gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from test.support import verbose, TestFailed
import locale
import sys
import test.support as support
import unittest
maxsize = support.MAX_Py_ssize_t
# test string formatting operator (I am not sure if this is being tested
# elsewhere but, surely, some of the given cases are *not* tested because
# they crash python)
# test on bytes object as well
def testformat(formatstr, args, output=None, limit=None, overflowok=False):
if verbose:
if output:
print("{!a} % {!a} =? {!a} ...".format(formatstr, args, output),
end=' ')
else:
print("{!a} % {!a} works? ...".format(formatstr, args), end=' ')
try:
result = formatstr % args
except OverflowError:
if not overflowok:
raise
if verbose:
print('overflow (this is fine)')
else:
if output and limit is None and result != output:
if verbose:
print('no')
raise AssertionError("%r %% %r == %r != %r" %
(formatstr, args, result, output))
# when 'limit' is specified, it determines how many characters
# must match exactly; lengths must always match.
# ex: limit=5, '12345678' matches '12345___'
# (mainly for floating point format tests for which an exact match
# can't be guaranteed due to rounding and representation errors)
elif output and limit is not None and (
len(result)!=len(output) or result[:limit]!=output[:limit]):
if verbose:
print('no')
print("%s %% %s == %s != %s" % \
(repr(formatstr), repr(args), repr(result), repr(output)))
else:
if verbose:
print('yes')
def testcommon(formatstr, args, output=None, limit=None, overflowok=False):
# if formatstr is a str, test str, bytes, and bytearray;
# otherwise, test bytes and bytearray
if isinstance(formatstr, str):
testformat(formatstr, args, output, limit, overflowok)
b_format = formatstr.encode('ascii')
else:
b_format = formatstr
ba_format = bytearray(b_format)
b_args = []
if not isinstance(args, tuple):
args = (args, )
b_args = tuple(args)
if output is None:
b_output = ba_output = None
else:
if isinstance(output, str):
b_output = output.encode('ascii')
else:
b_output = output
ba_output = bytearray(b_output)
testformat(b_format, b_args, b_output, limit, overflowok)
testformat(ba_format, b_args, ba_output, limit, overflowok)
def test_exc(formatstr, args, exception, excmsg):
try:
testformat(formatstr, args)
except exception as exc:
if str(exc) == excmsg:
if verbose:
print("yes")
else:
if verbose: print('no')
print('Unexpected ', exception, ':', repr(str(exc)))
except:
if verbose: print('no')
print('Unexpected exception')
raise
else:
raise TestFailed('did not get expected exception: %s' % excmsg)
def test_exc_common(formatstr, args, exception, excmsg):
# test str and bytes
test_exc(formatstr, args, exception, excmsg)
test_exc(formatstr.encode('ascii'), args, exception, excmsg)
class FormatTest(unittest.TestCase):
def test_common_format(self):
# test the format identifiers that work the same across
# str, bytes, and bytearrays (integer, float, oct, hex)
testcommon("%%", (), "%")
testcommon("%.1d", (1,), "1")
testcommon("%.*d", (sys.maxsize,1), overflowok=True) # expect overflow
testcommon("%.100d", (1,), '00000000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000001', overflowok=True)
testcommon("%#.117x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'0000000000000000000000000001',
overflowok=True)
testcommon("%#.118x", (1,), '0x00000000000000000000000000000000000'
'000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000001',
overflowok=True)
testcommon("%f", (1.0,), "1.000000")
# these are trying to test the limits of the internal magic-number-length
# formatting buffer, if that number changes then these tests are less
# effective
testcommon("%#.*g", (109, -1.e+49/3.))
testcommon("%#.*g", (110, -1.e+49/3.))
testcommon("%#.*g", (110, -1.e+100/3.))
# test some ridiculously large precision, expect overflow
testcommon('%12.*f', (123456, 1.0))
# check for internal overflow validation on length of precision
# these tests should no longer cause overflow in Python
# 2.7/3.1 and later.
testcommon("%#.*g", (110, -1.e+100/3.))
testcommon("%#.*G", (110, -1.e+100/3.))
testcommon("%#.*f", (110, -1.e+100/3.))
testcommon("%#.*F", (110, -1.e+100/3.))
# Formatting of integers. Overflow is not ok
testcommon("%x", 10, "a")
testcommon("%x", 100000000000, "174876e800")
testcommon("%o", 10, "12")
testcommon("%o", 100000000000, "1351035564000")
testcommon("%d", 10, "10")
testcommon("%d", 100000000000, "100000000000")
big = 123456789012345678901234567890
testcommon("%d", big, "123456789012345678901234567890")
testcommon("%d", -big, "-123456789012345678901234567890")
testcommon("%5d", -big, "-123456789012345678901234567890")
testcommon("%31d", -big, "-123456789012345678901234567890")
testcommon("%32d", -big, " -123456789012345678901234567890")
testcommon("%-32d", -big, "-123456789012345678901234567890 ")
testcommon("%032d", -big, "-0123456789012345678901234567890")
testcommon("%-032d", -big, "-123456789012345678901234567890 ")
testcommon("%034d", -big, "-000123456789012345678901234567890")
testcommon("%034d", big, "0000123456789012345678901234567890")
testcommon("%0+34d", big, "+000123456789012345678901234567890")
testcommon("%+34d", big, " +123456789012345678901234567890")
testcommon("%34d", big, " 123456789012345678901234567890")
testcommon("%.2d", big, "123456789012345678901234567890")
testcommon("%.30d", big, "123456789012345678901234567890")
testcommon("%.31d", big, "0123456789012345678901234567890")
testcommon("%32.31d", big, " 0123456789012345678901234567890")
testcommon("%d", float(big), "123456________________________", 6)
big = 0x1234567890abcdef12345 # 21 hex digits
testcommon("%x", big, "1234567890abcdef12345")
testcommon("%x", -big, "-1234567890abcdef12345")
testcommon("%5x", -big, "-1234567890abcdef12345")
testcommon("%22x", -big, "-1234567890abcdef12345")
testcommon("%23x", -big, " -1234567890abcdef12345")
testcommon("%-23x", -big, "-1234567890abcdef12345 ")
testcommon("%023x", -big, "-01234567890abcdef12345")
testcommon("%-023x", -big, "-1234567890abcdef12345 ")
testcommon("%025x", -big, "-0001234567890abcdef12345")
testcommon("%025x", big, "00001234567890abcdef12345")
testcommon("%0+25x", big, "+0001234567890abcdef12345")
testcommon("%+25x", big, " +1234567890abcdef12345")
testcommon("%25x", big, " 1234567890abcdef12345")
testcommon("%.2x", big, "1234567890abcdef12345")
testcommon("%.21x", big, "1234567890abcdef12345")
testcommon("%.22x", big, "01234567890abcdef12345")
testcommon("%23.22x", big, " 01234567890abcdef12345")
testcommon("%-23.22x", big, "01234567890abcdef12345 ")
testcommon("%X", big, "1234567890ABCDEF12345")
testcommon("%#X", big, "0X1234567890ABCDEF12345")
testcommon("%#x", big, "0x1234567890abcdef12345")
testcommon("%#x", -big, "-0x1234567890abcdef12345")
testcommon("%#27x", big, " 0x1234567890abcdef12345")
testcommon("%#-27x", big, "0x1234567890abcdef12345 ")
testcommon("%#027x", big, "0x00001234567890abcdef12345")
testcommon("%#.23x", big, "0x001234567890abcdef12345")
testcommon("%#.23x", -big, "-0x001234567890abcdef12345")
testcommon("%#27.23x", big, " 0x001234567890abcdef12345")
testcommon("%#-27.23x", big, "0x001234567890abcdef12345 ")
testcommon("%#027.23x", big, "0x00001234567890abcdef12345")
testcommon("%#+.23x", big, "+0x001234567890abcdef12345")
testcommon("%# .23x", big, " 0x001234567890abcdef12345")
testcommon("%#+.23X", big, "+0X001234567890ABCDEF12345")
# next one gets two leading zeroes from precision, and another from the
# 0 flag and the width
testcommon("%#+027.23X", big, "+0X0001234567890ABCDEF12345")
testcommon("%# 027.23X", big, " 0X0001234567890ABCDEF12345")
# same, except no 0 flag
testcommon("%#+27.23X", big, " +0X001234567890ABCDEF12345")
testcommon("%#-+27.23x", big, "+0x001234567890abcdef12345 ")
testcommon("%#- 27.23x", big, " 0x001234567890abcdef12345 ")
big = 0o12345670123456701234567012345670 # 32 octal digits
testcommon("%o", big, "12345670123456701234567012345670")
testcommon("%o", -big, "-12345670123456701234567012345670")
testcommon("%5o", -big, "-12345670123456701234567012345670")
testcommon("%33o", -big, "-12345670123456701234567012345670")
testcommon("%34o", -big, " -12345670123456701234567012345670")
testcommon("%-34o", -big, "-12345670123456701234567012345670 ")
testcommon("%034o", -big, "-012345670123456701234567012345670")
testcommon("%-034o", -big, "-12345670123456701234567012345670 ")
testcommon("%036o", -big, "-00012345670123456701234567012345670")
testcommon("%036o", big, "000012345670123456701234567012345670")
testcommon("%0+36o", big, "+00012345670123456701234567012345670")
testcommon("%+36o", big, " +12345670123456701234567012345670")
testcommon("%36o", big, " 12345670123456701234567012345670")
testcommon("%.2o", big, "12345670123456701234567012345670")
testcommon("%.32o", big, "12345670123456701234567012345670")
testcommon("%.33o", big, "012345670123456701234567012345670")
testcommon("%34.33o", big, " 012345670123456701234567012345670")
testcommon("%-34.33o", big, "012345670123456701234567012345670 ")
testcommon("%o", big, "12345670123456701234567012345670")
testcommon("%#o", big, "0o12345670123456701234567012345670")
testcommon("%#o", -big, "-0o12345670123456701234567012345670")
testcommon("%#38o", big, " 0o12345670123456701234567012345670")
testcommon("%#-38o", big, "0o12345670123456701234567012345670 ")
testcommon("%#038o", big, "0o000012345670123456701234567012345670")
testcommon("%#.34o", big, "0o0012345670123456701234567012345670")
testcommon("%#.34o", -big, "-0o0012345670123456701234567012345670")
testcommon("%#38.34o", big, " 0o0012345670123456701234567012345670")
testcommon("%#-38.34o", big, "0o0012345670123456701234567012345670 ")
testcommon("%#038.34o", big, "0o000012345670123456701234567012345670")
testcommon("%#+.34o", big, "+0o0012345670123456701234567012345670")
testcommon("%# .34o", big, " 0o0012345670123456701234567012345670")
testcommon("%#+38.34o", big, " +0o0012345670123456701234567012345670")
testcommon("%#-+38.34o", big, "+0o0012345670123456701234567012345670 ")
testcommon("%#- 38.34o", big, " 0o0012345670123456701234567012345670 ")
testcommon("%#+038.34o", big, "+0o00012345670123456701234567012345670")
testcommon("%# 038.34o", big, " 0o00012345670123456701234567012345670")
# next one gets one leading zero from precision
testcommon("%.33o", big, "012345670123456701234567012345670")
# base marker added in spite of leading zero (different to Python 2)
testcommon("%#.33o", big, "0o012345670123456701234567012345670")
# reduce precision, and base marker is always added
testcommon("%#.32o", big, "0o12345670123456701234567012345670")
# one leading zero from precision, plus two from "0" flag & width
testcommon("%035.33o", big, "00012345670123456701234567012345670")
# base marker shouldn't change the size
testcommon("%0#35.33o", big, "0o012345670123456701234567012345670")
# Some small ints, in both Python int and flavors).
testcommon("%d", 42, "42")
testcommon("%d", -42, "-42")
testcommon("%d", 42.0, "42")
testcommon("%#x", 1, "0x1")
testcommon("%#X", 1, "0X1")
testcommon("%#o", 1, "0o1")
testcommon("%#o", 0, "0o0")
testcommon("%o", 0, "0")
testcommon("%d", 0, "0")
testcommon("%#x", 0, "0x0")
testcommon("%#X", 0, "0X0")
testcommon("%x", 0x42, "42")
testcommon("%x", -0x42, "-42")
testcommon("%o", 0o42, "42")
testcommon("%o", -0o42, "-42")
# alternate float formatting
testcommon('%g', 1.1, '1.1')
testcommon('%#g', 1.1, '1.10000')
if verbose:
print('Testing exceptions')
test_exc_common('%', (), ValueError, "incomplete format")
test_exc_common('% %s', 1, ValueError,
"unsupported format character '%' (0x25) at index 2")
test_exc_common('%d', '1', TypeError,
"%d format: a number is required, not str")
test_exc_common('%d', b'1', TypeError,
"%d format: a number is required, not bytes")
test_exc_common('%x', '1', TypeError,
"%x format: an integer is required, not str")
test_exc_common('%x', 3.14, TypeError,
"%x format: an integer is required, not float")
def test_str_format(self):
testformat("%r", "\u0378", "'\\u0378'") # non printable
testformat("%a", "\u0378", "'\\u0378'") # non printable
testformat("%r", "\u0374", "'\u0374'") # printable
testformat("%a", "\u0374", "'\\u0374'") # printable
# Test exception for unknown format characters, etc.
if verbose:
print('Testing exceptions')
test_exc('abc %b', 1, ValueError,
"unsupported format character 'b' (0x62) at index 5")
#test_exc(unicode('abc %\u3000','raw-unicode-escape'), 1, ValueError,
# "unsupported format character '?' (0x3000) at index 5")
test_exc('%g', '1', TypeError, "must be real number, not str")
test_exc('no format', '1', TypeError,
"not all arguments converted during string formatting")
test_exc('%c', -1, OverflowError, "%c arg not in range(0x110000)")
test_exc('%c', sys.maxunicode+1, OverflowError,
"%c arg not in range(0x110000)")
#test_exc('%c', 2**128, OverflowError, "%c arg not in range(0x110000)")
test_exc('%c', 3.14, TypeError, "%c requires int or char")
test_exc('%c', 'ab', TypeError, "%c requires int or char")
test_exc('%c', b'x', TypeError, "%c requires int or char")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed('"%*d"%(maxsize, -127) should fail')
def test_bytes_and_bytearray_format(self):
# %c will insert a single byte, either from an int in range(256), or
# from a bytes argument of length 1, not from a str.
testcommon(b"%c", 7, b"\x07")
testcommon(b"%c", b"Z", b"Z")
testcommon(b"%c", bytearray(b"Z"), b"Z")
testcommon(b"%5c", 65, b" A")
testcommon(b"%-5c", 65, b"A ")
# %b will insert a series of bytes, either from a type that supports
# the Py_buffer protocol, or something that has a __bytes__ method
class FakeBytes(object):
def __bytes__(self):
return b'123'
fb = FakeBytes()
testcommon(b"%b", b"abc", b"abc")
testcommon(b"%b", bytearray(b"def"), b"def")
testcommon(b"%b", fb, b"123")
testcommon(b"%b", memoryview(b"abc"), b"abc")
# # %s is an alias for %b -- should only be used for Py2/3 code
testcommon(b"%s", b"abc", b"abc")
testcommon(b"%s", bytearray(b"def"), b"def")
testcommon(b"%s", fb, b"123")
testcommon(b"%s", memoryview(b"abc"), b"abc")
# %a will give the equivalent of
# repr(some_obj).encode('ascii', 'backslashreplace')
testcommon(b"%a", 3.14, b"3.14")
testcommon(b"%a", b"ghi", b"b'ghi'")
testcommon(b"%a", "jkl", b"'jkl'")
testcommon(b"%a", "\u0544", b"'\\u0544'")
# %r is an alias for %a
testcommon(b"%r", 3.14, b"3.14")
testcommon(b"%r", b"ghi", b"b'ghi'")
testcommon(b"%r", "jkl", b"'jkl'")
testcommon(b"%r", "\u0544", b"'\\u0544'")
# Test exception for unknown format characters, etc.
if verbose:
print('Testing exceptions')
test_exc(b'%g', '1', TypeError, "float argument required, not str")
test_exc(b'%g', b'1', TypeError, "float argument required, not bytes")
test_exc(b'no format', 7, TypeError,
"not all arguments converted during bytes formatting")
test_exc(b'no format', b'1', TypeError,
"not all arguments converted during bytes formatting")
test_exc(b'no format', bytearray(b'1'), TypeError,
"not all arguments converted during bytes formatting")
test_exc(b"%c", -1, OverflowError,
"%c arg not in range(256)")
test_exc(b"%c", 256, OverflowError,
"%c arg not in range(256)")
test_exc(b"%c", 2**128, OverflowError,
"%c arg not in range(256)")
test_exc(b"%c", b"Za", TypeError,
"%c requires an integer in range(256) or a single byte")
test_exc(b"%c", "Y", TypeError,
"%c requires an integer in range(256) or a single byte")
test_exc(b"%c", 3.14, TypeError,
"%c requires an integer in range(256) or a single byte")
test_exc(b"%b", "Xc", TypeError,
"%b requires a bytes-like object, "
"or an object that implements __bytes__, not 'str'")
test_exc(b"%s", "Wd", TypeError,
"%b requires a bytes-like object, "
"or an object that implements __bytes__, not 'str'")
if maxsize == 2**31-1:
# crashes 2.2.1 and earlier:
try:
"%*d"%(maxsize, -127)
except MemoryError:
pass
else:
raise TestFailed('"%*d"%(maxsize, -127) should fail')
def test_nul(self):
# test the null character
testcommon("a\0b", (), 'a\0b')
testcommon("a%cb", (0,), 'a\0b')
testformat("a%sb", ('c\0d',), 'ac\0db')
testcommon(b"a%sb", (b'c\0d',), b'ac\0db')
def test_non_ascii(self):
testformat("\u20ac=%f", (1.0,), "\u20ac=1.000000")
self.assertEqual(format("abc", "\u2007<5"), "abc\u2007\u2007")
self.assertEqual(format(123, "\u2007<5"), "123\u2007\u2007")
self.assertEqual(format(12.3, "\u2007<6"), "12.3\u2007\u2007")
self.assertEqual(format(0j, "\u2007<4"), "0j\u2007\u2007")
self.assertEqual(format(1+2j, "\u2007<8"), "(1+2j)\u2007\u2007")
self.assertEqual(format("abc", "\u2007>5"), "\u2007\u2007abc")
self.assertEqual(format(123, "\u2007>5"), "\u2007\u2007123")
self.assertEqual(format(12.3, "\u2007>6"), "\u2007\u200712.3")
self.assertEqual(format(1+2j, "\u2007>8"), "\u2007\u2007(1+2j)")
self.assertEqual(format(0j, "\u2007>4"), "\u2007\u20070j")
self.assertEqual(format("abc", "\u2007^5"), "\u2007abc\u2007")
self.assertEqual(format(123, "\u2007^5"), "\u2007123\u2007")
self.assertEqual(format(12.3, "\u2007^6"), "\u200712.3\u2007")
self.assertEqual(format(1+2j, "\u2007^8"), "\u2007(1+2j)\u2007")
self.assertEqual(format(0j, "\u2007^4"), "\u20070j\u2007")
def test_locale(self):
try:
oldloc = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, '')
except locale.Error as err:
self.skipTest("Cannot set locale: {}".format(err))
try:
localeconv = locale.localeconv()
sep = localeconv['thousands_sep']
point = localeconv['decimal_point']
text = format(123456789, "n")
self.assertIn(sep, text)
self.assertEqual(text.replace(sep, ''), '123456789')
text = format(1234.5, "n")
self.assertIn(sep, text)
self.assertIn(point, text)
self.assertEqual(text.replace(sep, ''), '1234' + point + '5')
finally:
locale.setlocale(locale.LC_ALL, oldloc)
@support.cpython_only
def test_optimisations(self):
text = "abcde" # 5 characters
self.assertIs("%s" % text, text)
self.assertIs("%.5s" % text, text)
self.assertIs("%.10s" % text, text)
self.assertIs("%1s" % text, text)
self.assertIs("%5s" % text, text)
self.assertIs("{0}".format(text), text)
self.assertIs("{0:s}".format(text), text)
self.assertIs("{0:.5s}".format(text), text)
self.assertIs("{0:.10s}".format(text), text)
self.assertIs("{0:1s}".format(text), text)
self.assertIs("{0:5s}".format(text), text)
self.assertIs(text % (), text)
self.assertIs(text.format(), text)
def test_precision(self):
f = 1.2
self.assertEqual(format(f, ".0f"), "1")
self.assertEqual(format(f, ".3f"), "1.200")
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (sys.maxsize + 1))
c = complex(f)
self.assertEqual(format(c, ".0f"), "1+0j")
self.assertEqual(format(c, ".3f"), "1.200+0.000j")
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (sys.maxsize + 1))
@support.cpython_only
def test_precision_c_limits(self):
from _testcapi import INT_MAX
f = 1.2
with self.assertRaises(ValueError) as cm:
format(f, ".%sf" % (INT_MAX + 1))
c = complex(f)
with self.assertRaises(ValueError) as cm:
format(c, ".%sf" % (INT_MAX + 1))
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute.plugins.v3 import agents
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithHypervisor(object):
environ = {"nova.context": context.get_admin_context()}
GET = {'hypervisor': 'kvm'}
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
class AgentsTest(test.NoDBTestCase):
def setUp(self):
super(AgentsTest, self).setUp()
self.stubs.Set(db, "agent_build_get_all",
fake_agent_build_get_all)
self.stubs.Set(db, "agent_build_update",
fake_agent_build_update)
self.stubs.Set(db, "agent_build_destroy",
fake_agent_build_destroy)
self.stubs.Set(db, "agent_build_create",
fake_agent_build_create)
self.context = context.get_admin_context()
self.controller = agents.AgentController()
def test_agents_create(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, response)
self.assertEqual(self.controller.create.wsgi_code, 201)
def test_agents_create_with_existed_agent(self):
self.stubs.Set(db, 'agent_build_create',
fake_agent_build_create_with_exited_agent)
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPConflict, self.controller.create, req, body)
def test_agents_create_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_without_url(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_without_version(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_without_architecture(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_without_os(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_without_hypervisor(self):
req = FakeRequest()
body = {'agent': {'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_create_with_empty_type(self):
req = FakeRequest()
body = {}
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
req, body)
def test_agents_delete(self):
req = FakeRequest()
self.controller.delete(req, 1)
def test_agents_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
self.assertEqual(res_dict, {'agents': agents_list})
def test_agents_list_with_hypervisor(self):
req = FakeRequestWithHypervisor()
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_update(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(req, 1, body)
self.assertEqual(res_dict, response)
def test_agents_update_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
req, 1, body)
def test_agents_update_without_url(self):
req = FakeRequest()
body = {'agent': {'version': '7.0'}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
req, 1, body)
def test_agents_update_without_version(self):
req = FakeRequest()
body = {'agent': {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
req, 1, body)
def test_agents_update_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
req, 1, body)
def test_agents_update_with_empty(self):
req = FakeRequest()
body = {}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
req, 1, body)
|
|
#!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for cluster.py."""
import unittest
import json
import cluster
import process_profiles
from test_utils import (ProfileFile,
SimpleTestSymbol,
TestProfileManager,
TestSymbolOffsetProcessor)
class ClusteringTestCase(unittest.TestCase):
def testClusterOf(self):
clstr = cluster.Clustering()
c = clstr.ClusterOf('a')
self.assertEqual(['a'], c.syms)
c = clstr._MakeCluster(['a', 'b', 'c'])
self.assertEqual(c, clstr.ClusterOf('a'))
self.assertEqual(c, clstr.ClusterOf('b'))
self.assertEqual(c, clstr.ClusterOf('c'))
def testClusterCombine(self):
clstr = cluster.Clustering()
x = clstr._MakeCluster(['a', 'b'])
self.assertEqual(x, clstr.ClusterOf('a'))
self.assertEqual(x, clstr.ClusterOf('b'))
y = clstr._MakeCluster(['c'])
self.assertEqual(y, clstr.ClusterOf('c'))
z = clstr.Combine(y, x)
self.assertEqual(['c', 'a', 'b'], z.syms)
self.assertEqual(z, clstr.ClusterOf('a'))
self.assertEqual(z, clstr.ClusterOf('b'))
self.assertEqual(z, clstr.ClusterOf('c'))
def testClusteringDistances(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.AddSymbolLists([list('abcd'), list('acbe'), list('bacf'),
list('badf'), list('baef')])
distances = {}
for n in c._neighbors:
self.assertFalse((n.src, n.dst) in distances)
distances[(n.src, n.dst)] = n.dist
self.assertEqual(13, len(distances))
self.assertEqual((2 + 1 + 1 + 2000) / 5., distances[('a', 'c')])
self.assertEqual((1 + 4000) / 5., distances[('a', 'd')])
self.assertEqual((1 + 4000) / 5., distances[('a', 'e')])
self.assertEqual((2 + 2 + 2 + 2000) / 5., distances[('a', 'f')])
self.assertEqual(0, distances[('b', 'a')])
self.assertEqual((1 + -1 + 2 + 2000) / 5., distances[('b', 'c')])
self.assertTrue(('b', 'd') in distances)
self.assertTrue(('b', 'e') in distances)
self.assertTrue(('c', 'd') in distances)
self.assertTrue(('c', 'e') in distances)
self.assertTrue(('c', 'f') in distances)
self.assertTrue(('d', 'f') in distances)
self.assertTrue(('e', 'f') in distances)
def testClusterToList(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.AddSymbolLists([list('abcd'), list('acbe'), list('bacf'),
list('badf'), list('baef')])
self.assertEqual(list('bacfed'), c.ClusterToList())
def testClusterOneList(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.AddSymbolLists([list('fedcba')])
self.assertEqual(list('fedcba'), c.ClusterToList())
def testClusterShortList(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.AddSymbolLists([list('ab')])
self.assertEqual(list('ab'), c.ClusterToList())
def testClusterReallyShortList(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.AddSymbolLists([list('a')])
self.assertEqual([], c.ClusterToList())
def testSizedClusterToList(self):
c = cluster.Clustering()
c.NEIGHBOR_DISTANCE = 3
c.MAX_CLUSTER_SIZE = 1 # Will supress all clusters
size_map = {'a': 3,
'b': 4,
'c': 5,
'd': 6,
'e': 7,
'f': 8}
c.AddSymbolLists([list('abcd'), list('acbe'), list('bacf'),
list('badf'), list('baef')])
self.assertEqual(list('fedcba'), c.ClusterToList(size_map))
def testClusterOffsets(self):
processor = TestSymbolOffsetProcessor([
SimpleTestSymbol('linker_script_start_of_text', 0, 0),
SimpleTestSymbol('1', 1000, 999),
SimpleTestSymbol('2', 2000, 999),
SimpleTestSymbol('3', 3000, 999),
SimpleTestSymbol('4', 4000, 16),
SimpleTestSymbol('5', 5000, 16),
SimpleTestSymbol('6', 6000, 999),
SimpleTestSymbol('7', 7000, 16),
SimpleTestSymbol('8', 8000, 999),
SimpleTestSymbol('9', 9000, 16),
])
mgr = TestProfileManager({
ProfileFile(40, 0, ''): [1000, 2000, 3000],
ProfileFile(50, 1, ''): [3000, 4000, 5000],
ProfileFile(51, 0, 'renderer'): [2000, 3000, 6000],
ProfileFile(51, 1, 'gpu-process'): [6000, 7000],
ProfileFile(70, 0, ''): [1000, 2000, 6000, 8000, 9000],
ProfileFile(70, 1, ''): [9000, 5000, 3000]})
syms = cluster.ClusterOffsets(mgr, processor, limit_cluster_size=False)
self.assertListEqual(list('236148957'), syms)
syms = cluster.ClusterOffsets(mgr, processor, limit_cluster_size=True)
self.assertListEqual(list('236489517'), syms)
def testClusteringDistancesForCallGraph(self):
c = cluster.Clustering()
callerA = cluster.CallerInfo(caller_symbol='a', count=1)
callerB = cluster.CallerInfo(caller_symbol='b', count=2)
callerC = cluster.CallerInfo(caller_symbol='c', count=3)
callerD = cluster.CallerInfo(caller_symbol='d', count=100)
callerE = cluster.CallerInfo(caller_symbol='e', count=200)
calleeA = cluster.CalleeInfo(index=4, callee_symbol='a', misses=0,
caller_and_count=[])
calleeB = cluster.CalleeInfo(index=8, callee_symbol='b', misses=1,
caller_and_count=[callerA])
calleeC = cluster.CalleeInfo(index=12, callee_symbol='c', misses=1,
caller_and_count=[callerA, callerE])
calleeD = cluster.CalleeInfo(index=20, callee_symbol='d', misses=1,
caller_and_count=[callerB, callerC, callerE])
calleeF = cluster.CalleeInfo(index=28, callee_symbol='f', misses=10,
caller_and_count=[callerD])
process1 = [calleeA, calleeB, calleeC, calleeD]
process2 = [calleeA, calleeB, calleeC, calleeD, calleeF]
call_graph = [process1, process2]
whitelist = ['e', 'g', 'h', 'k', 'l']
c.AddSymbolCallGraph(call_graph, whitelist)
distances = {}
for n in c._neighbors:
self.assertFalse((n.src, n.dst) in distances)
distances[(n.src, n.dst)] = n.dist
self.assertEqual(5, len(distances))
self.assertEquals(-2, distances[('a', 'b')])
self.assertEquals(-2, distances[('a', 'c')])
self.assertEquals(-4, distances[('b', 'd')])
self.assertEquals(-6, distances[('c', 'd')])
self.assertEquals(-100, distances[('d', 'f')])
self.assertEquals(list('abcdf'), c.ClusterToList())
def testClusterOffsetsFromCallGraph(self):
process1 = ('{"call_graph": [ {'
'"callee_offset": "1000",'
'"caller_and_count": [ {'
'"caller_offset": "0",'
'"count": "2"'
'} ],'
'"index": "61496"'
'}, {'
'"callee_offset": "7000",'
'"caller_and_count": [ {'
'"caller_offset": "1000",'
'"count": "2"'
'}, {'
'"caller_offset": "7500",'
'"count": "100"'
'} ],'
'"index": "61500"'
'}, {'
'"callee_offset": "6000",'
'"caller_and_count": [ {'
'"caller_offset": "1000",'
'"count": "4"'
'}, {'
'"caller_offset": "7000",'
'"count": "3"'
'}, {'
'"caller_offset": "7500",'
'"count": "2"'
'}, {'
'"caller_offset": "0",'
'"count": "3"'
'} ],'
'"index": "47860"'
'}, {'
'"callee_offset": "3000",'
'"caller_and_count": [ {'
'"caller_offset": "6000",'
'"count": "11"'
'} ],'
'"index": "47900"'
'} ],'
'"total_calls_count": "127"'
'}')
process2 = ('{"call_graph": [ {'
'"callee_offset": "1000",'
'"caller_and_count": [ {'
'"caller_offset": "0",'
'"count": "2"'
'} ],'
'"index": "61496"'
'}, {'
'"callee_offset": "5000",'
'"caller_and_count": [ {'
'"caller_offset": "1000",'
'"count": "20"'
'}, {'
'"caller_offset": "5000",'
'"count": "100"'
'}, {'
'"caller_offset": "3000",'
'"count": "40"'
'} ],'
'"index": "61500"'
'}, {'
'"callee_offset": "3000",'
'"caller_and_count": [ {'
'"caller_offset": "5000",'
'"count": "10"'
'}, {'
'"caller_offset": "0",'
'"count": "10"'
'} ],'
'"index": "47860"'
'} ],'
'"total_calls_count": "182"'
'}')
process3 = ('{"call_graph": [ {'
'"callee_offset": "8000",'
'"caller_and_count": [ {'
'"caller_offset": "0",'
'"count": "5"'
'} ],'
'"index": "61496"'
'}, {'
'"callee_offset": "2000",'
'"caller_and_count": [ {'
'"caller_offset": "8000",'
'"count": "100"'
'} ],'
'"index": "61500"'
'}, {'
'"callee_offset": "4000",'
'"caller_and_count": [ {'
'"caller_offset": "8000",'
'"count": "20"'
'} ],'
'"index": "61504"'
'}, {'
'"callee_offset": "9000",'
'"caller_and_count": [ {'
'"caller_offset": "8000",'
'"count": "50"'
'} ],'
'"index": "61512"'
'}, {'
'"callee_offset": "7000",'
'"caller_and_count": [ {'
'"caller_offset": "2000",'
'"count": "15"'
'}, {'
'"caller_offset": "4000",'
'"count": "20"'
'}, {'
'"caller_offset": "9000",'
'"count": "80"'
'}, {'
'"caller_offset": "0",'
'"count": "400"'
'} ],'
'"index": "61516"'
'} ],'
'"total_calls_count": "690"'
'}')
process4 = ('{"call_graph": [ {'
'"callee_offset": "8000",'
'"caller_and_count": [ {'
'"caller_offset": "0",'
'"count": "10"'
'} ],'
'"index": "61496"'
'}, {'
'"callee_offset": "2000",'
'"caller_and_count": [ {'
'"caller_offset": "8000",'
'"count": "100"'
'} ],'
'"index": "61500"'
'}, {'
'"callee_offset": "6000",'
'"caller_and_count": [ {'
'"caller_offset": "7000",'
'"count": "10"'
'} , {'
'"caller_offset": "7500",'
'"count": "2"'
'} ],'
'"index": "61504"'
'}, {'
'"callee_offset": "7000",'
'"caller_and_count": [ {'
'"caller_offset": "8000",'
'"count": "300"'
'}, {'
'"caller_offset": "7500",'
'"count": "100"'
'}, {'
'"caller_offset": "2000",'
'"count": "15"'
'}, {'
'"caller_offset": "0",'
'"count": "50"'
'} ],'
'"index": "61516"'
'} ],'
'"total_calls_count": "587"'
'}')
processor = TestSymbolOffsetProcessor([
SimpleTestSymbol('linker_script_start_of_text', 0, 0),
SimpleTestSymbol('1', 1000, 999),
SimpleTestSymbol('2', 2000, 999),
SimpleTestSymbol('3', 3000, 999),
SimpleTestSymbol('4', 4000, 16),
SimpleTestSymbol('5', 5000, 16),
SimpleTestSymbol('6', 6000, 999),
SimpleTestSymbol('7', 7000, 16),
SimpleTestSymbol('8', 7100, 0), # whitelist
SimpleTestSymbol('9', 8000, 999),
SimpleTestSymbol('10', 9000, 16)])
mgr = TestProfileManager({
ProfileFile(40, 0, 'renderer'): json.loads(process1),
ProfileFile(50, 1, 'renderer'): json.loads(process2),
ProfileFile(51, 0, 'browser'): json.loads(process3),
ProfileFile(51, 1, 'gpu-process'): json.loads(process4)})
syms = cluster.ClusterOffsets(mgr, processor, limit_cluster_size=False,
call_graph=True)
self.assertListEqual(['7', '6', '1', '5', '3', '9', '2', '10', '4'], syms)
if __name__ == "__main__":
unittest.main()
|
|
#! /usr/bin/env python
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Little helper for the `google-play-crawler`
'''
import csv
from datetime import datetime
import json
import os
from os.path import abspath
import subprocess
import sys
import time
import traceback
from collections import OrderedDict
############################################################
# Edit ! #
############################################################
GOOGLE_PLAY_CRAWLER_BIN_NAME = "googleplaycrawler.jar"
GOOGLE_PLAY_CRAWLER_CONF = os.path.abspath("conf/crawler.conf")
DL_ROOT_DIR = "playstore_dl/"
# time to sleep between starting next download
DL_SLEEP_TIME = 10
############################################################
# Don't touch #
############################################################
BASE_EXEC = "java -jar {} --conf {}".format(abspath(GOOGLE_PLAY_CRAWLER_BIN_NAME), GOOGLE_PLAY_CRAWLER_CONF)
GET_PACKAGES = "{} list %s -s %s -n %s -o %s".format(BASE_EXEC)
APK_DOWNLOAD = "{} download %s".format(BASE_EXEC)
LIST_CATEGORIES = "{} categories".format(BASE_EXEC)
SUBCATEGORY_TOPSELLING_FREE = "apps_topselling_free"
SUBCATEGORY_TOPSELLING_NEW_FREE = "apps_topselling_new_free"
# otherwise googleplaycrawler says "oo many results requested.*"
MAX_CNT_GPC_LISTING = 100
# the maximal offset we can supply gpc
MAX_OFFSET_GPC_LISTING = 499
def print_help():
prog_name = sys.argv[0]
print """Usage: %s <list
|download_new_all_categories <number>
|download_top_all_categories <number>
|download <category> <subcategory> <number>
|download_pn <package_name>
>\n""" % prog_name
print """The script relies on google-play-crawler.
Be sure you have done the following steps before using this script!
1) Get it from here: https://github.com/Akdeniz/google-play-crawler
and place this script inside the googleplay directory after it has been build.
2) Set email and password in crawler.conf
3) Get androidid via "java -jar googleplaycrawler.jar -f crawler.conf checkin" and set in in the config file
4) playstore.py benutzen ;)\n"""
print "Example: %s download WEATHER apps_topselling_new_free 2" % prog_name
print "Example: %s download_pn a2dp.Vol" % prog_name
print "Example: %s list" % prog_name
print "Example: %s download_new_all_categories 10" % prog_name
print "Example: %s download_top_all_categories 10\n" % prog_name
print """Possible subcategories are:
apps_topselling_paid
apps_topselling_free
apps_topgrossing
apps_topselling_new_paid
apps_topselling_new_free"""
sys.exit(1)
def get_cagetories():
''' Returns a list of categories available on the PlayStore '''
proc = subprocess.Popen(LIST_CATEGORIES, shell = True, stdout=subprocess.PIPE)
csvr = csv.DictReader(proc.stdout, delimiter=";")
return [row["ID"] for row in csvr]
def get_package_names(category, subcategory, number = 50):
''' Get a set of package names for the given `category` and `subcategory` '''
# gpc can only list `MAX_CNT_GPC_LISTING` items at once -> we have to divide it into n queries
cnt_runs = number / MAX_CNT_GPC_LISTING + 1
offset = 0
package_names = []
for _ in range(1, cnt_runs + 1):
# no more items available, limited through play store
if offset >= MAX_OFFSET_GPC_LISTING:
break
# download first items
proc = subprocess.Popen(GET_PACKAGES % (category, subcategory, min(number, MAX_CNT_GPC_LISTING), offset), shell = True, stdout=subprocess.PIPE)
csvr = csv.DictReader(proc.stdout, delimiter=";")
package_names.extend([row["Package"] for row in csvr])
# next query with new offset
offset += MAX_CNT_GPC_LISTING
if offset > MAX_OFFSET_GPC_LISTING:
offset = MAX_OFFSET_GPC_LISTING
return set(package_names)
def check_n_create_dl_dir(sub_dir = "."):
''' Check if the download directory already exists.
Otherwise create it.
Parameters
----------
sub_dirs : str
Subdirectory to create under the root download directory.
'''
dl_dir = os.path.join(DL_ROOT_DIR, sub_dir)
try:
# create dir structure if not existing
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
except OSError:
traceback.print_exception(*sys.exc_info())
def download_apks(package_name_list, dl_root_dir = "."):
''' Download the .apk s for the given list of pacakge names to the specified `dl_dir` (default is `DL_ROOT_DIR`) '''
print "Downloading: %s" % ', '.join(package_name_list)
for pn in package_name_list:
old_cwd = os.getcwd()
check_n_create_dl_dir(dl_root_dir)
dl_dir = os.path.join(DL_ROOT_DIR, dl_root_dir)
try:
while 1:
# change do download dir
os.chdir(dl_dir)
dl = subprocess.Popen(APK_DOWNLOAD % pn, shell = True, stdout = None)
# wait for process to finish
dl.wait()
if dl.returncode == 0:
break
else:
sys.stderr.write("Could not download %s! Retrying ...")
except:
traceback.print_exception(*sys.exc_info())
finally:
# change back to old cwd
os.chdir(old_cwd)
# don't be too aggressive
print "starting next dl in %ss" % DL_SLEEP_TIME
time.sleep(DL_SLEEP_TIME)
def download_n_all_categories(subcategory, number):
''' Download `number` of apks from `subcategory` '''
filename = os.path.join(DL_ROOT_DIR, 'top_%d_%s_%s.json' % (number, subcategory, datetime.now()))
apks_dict = OrderedDict()
# create root dl dir first
check_n_create_dl_dir()
with open(filename, "w") as f:
for category in get_cagetories():
f.seek(0)
print "Downloading the %s apks from category: %s" % (subcategory, category)
package_names = get_package_names(category, subcategory, number)
apks_dict[category] = list(package_names)
json.dump(apks_dict, f, indent = 4)
f.flush()
# dl dir : subcategory/category/
dl_dir = os.path.join(subcategory, category)
download_apks(package_names, dl_dir)
print "\n" * 5
if __name__ == "__main__":
args = sys.argv
if len(args) < 2:
print_help()
else:
args = sys.argv[1:]
cmd = args[0]
if cmd == "download":
if len(args) != 4:
print_help()
category, subcategory, number = args[1:]
number = int(number)
package_names = get_package_names(category, subcategory, number)
print "packages: %s" % ', '.join(package_names)
dl_dir = os.path.join(category, subcategory)
download_apks(package_names, dl_dir)
elif cmd == "list":
print '\n'.join(get_cagetories())
elif cmd == "download_pn":
if len(args) != 2:
print_help()
package_name = args[1]
download_apks([package_name])
elif cmd in ("download_new_all_categories", "download_top_all_categories"):
if len(args) != 2:
print_help()
number = args[1]
number = int(number)
if cmd == "download_new_all_categories":
download_n_all_categories(SUBCATEGORY_TOPSELLING_NEW_FREE, number)
elif cmd == "download_top_all_categories":
download_n_all_categories(SUBCATEGORY_TOPSELLING_FREE, number)
else:
print "Unknown command!"
print_help()
|
|
# Copyright (c) 2010-2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from urllib import unquote
import cStringIO as StringIO
from logging.handlers import SysLogHandler
import mock
from test.unit import FakeLogger
from swift.common.utils import get_logger
from swift.common.middleware import proxy_logging
from swift.common.swob import Request, Response
from swift.common import constraints
class FakeApp(object):
def __init__(self, body=None, response_str='200 OK'):
if body is None:
body = ['FAKE APP']
self.body = body
self.response_str = response_str
def __call__(self, env, start_response):
start_response(self.response_str,
[('Content-Type', 'text/plain'),
('Content-Length', str(sum(map(len, self.body))))])
while env['wsgi.input'].read(5):
pass
return self.body
class FakeAppThatExcepts(object):
def __call__(self, env, start_response):
raise Exception("We take exception to that!")
class FakeAppNoContentLengthNoTransferEncoding(object):
def __init__(self, body=None):
if body is None:
body = ['FAKE APP']
self.body = body
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
while env['wsgi.input'].read(5):
pass
return self.body
class FileLikeExceptor(object):
def __init__(self):
pass
def read(self, len):
raise IOError('of some sort')
def readline(self, len=1024):
raise IOError('of some sort')
class FakeAppReadline(object):
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain'),
('Content-Length', '8')])
env['wsgi.input'].readline()
return ["FAKE APP"]
def start_response(*args):
pass
class TestProxyLogging(unittest.TestCase):
def _log_parts(self, app, should_be_empty=False):
info_calls = app.access_logger.log_dict['info']
if should_be_empty:
self.assertEquals([], info_calls)
else:
self.assertEquals(1, len(info_calls))
return info_calls[0][0][0].split(' ')
def assertTiming(self, exp_metric, app, exp_timing=None):
timing_calls = app.access_logger.log_dict['timing']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_timing is not None:
self.assertAlmostEqual(exp_timing, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTiming: %s not found in %r' % (
exp_metric, timing_calls))
def assertTimingSince(self, exp_metric, app, exp_start=None):
timing_calls = app.access_logger.log_dict['timing_since']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_start is not None:
self.assertAlmostEqual(exp_start, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTimingSince: %s not found in %r' % (
exp_metric, timing_calls))
def assertNotTiming(self, not_exp_metric, app):
timing_calls = app.access_logger.log_dict['timing']
for timing_call in timing_calls:
self.assertNotEqual(not_exp_metric, timing_call[0][0])
def assertUpdateStats(self, exp_metric, exp_bytes, app):
update_stats_calls = app.access_logger.log_dict['update_stats']
self.assertEquals(1, len(update_stats_calls))
self.assertEquals({}, update_stats_calls[0][1])
self.assertEquals((exp_metric, exp_bytes), update_stats_calls[0][0])
def test_log_request_statsd_invalid_stats_types(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
for url in ['/', '/foo', '/foo/bar', '/v1']:
req = Request.blank(url, environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# get body
''.join(resp)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_bad(self):
for bad_path in ['', '/', '/bad', '/baddy/mc_badderson', '/v1',
'/v1/']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank(bad_path, environ={'REQUEST_METHOD': 'GET'})
now = 10000.0
app.log_request(req, 123, 7, 13, now, now + 2.71828182846)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_good(self):
"""
log_request() should send timing and byte-count counters for GET
requests. Also, __call__()'s iter_response() function should
statsd-log time to first byte (calling the passed-in start_response
function), but only for GET requests.
"""
stub_times = []
def stub_time():
return stub_times.pop(0)
path_types = {
'/v1/a': 'account',
'/v1/a/': 'account',
'/v1/a/c': 'container',
'/v1/a/c/': 'container',
'/v1/a/c/o': 'object',
'/v1/a/c/o/': 'object',
'/v1/a/c/o/p': 'object',
'/v1/a/c/o/p/': 'object',
'/v1/a/c/o/p/p2': 'object',
}
with mock.patch("time.time", stub_time):
for path, exp_type in path_types.items():
# GET
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertTiming('%s.GET.321.timing' % exp_type, app,
exp_timing=2.71828182846 * 1000)
self.assertTimingSince(
'%s.GET.321.first-byte.timing' % exp_type, app,
exp_start=18.0)
self.assertUpdateStats('%s.GET.321.xfer' % exp_type,
4 + 7, app)
# GET with swift.proxy_access_log_made already set
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'swift.proxy_access_log_made': True,
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([],
app.access_logger.log_dict['timing_since'])
self.assertEqual([],
app.access_logger.log_dict['update_stats'])
# PUT (no first-byte timing!)
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='87654321', response_str='314 PiTown'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('654321')})
# (it's not a GET, so time() doesn't have a 2nd call)
stub_times = [58.2, 58.2 + 7.3321]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('87654321', ''.join(iter_response))
self.assertTiming('%s.PUT.314.timing' % exp_type, app,
exp_timing=7.3321 * 1000)
self.assertNotTiming(
'%s.GET.314.first-byte.timing' % exp_type, app)
self.assertNotTiming(
'%s.PUT.314.first-byte.timing' % exp_type, app)
self.assertUpdateStats(
'%s.PUT.314.xfer' % exp_type, 6 + 8, app)
def test_log_request_stat_method_filtering_default(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'BAD_METHOD',
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'COPY',
'HEAD': 'HEAD',
'POST': 'POST',
'DELETE': 'DELETE',
'OPTIONS': 'OPTIONS',
}
for method, exp_method in method_map.items():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 299, 11, 3, now, now + 1.17)
self.assertTiming('account.%s.299.timing' % exp_method, app,
exp_timing=1.17 * 1000)
self.assertUpdateStats('account.%s.299.xfer' % exp_method,
11 + 3, app)
def test_log_request_stat_method_filtering_custom(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'SPECIAL', # will be configured
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'BAD_METHOD', # prove no one's special
}
# this conf var supports optional leading access_
for conf_key in ['access_log_statsd_valid_http_methods',
'log_statsd_valid_http_methods']:
for method, exp_method in method_map.items():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 911, 4, 43, now, now + 1.01)
self.assertTiming('container.%s.911.timing' % exp_method, app,
exp_timing=1.01 * 1000)
self.assertUpdateStats('container.%s.911.xfer' % exp_method,
4 + 43, app)
def test_basic_req(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_basic_req_second_time(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'swift.proxy_access_log_made': True,
'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
self._log_parts(app, should_be_empty=True)
self.assertEquals(resp_body, 'FAKE APP')
def test_multi_segment_resp(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(
['some', 'chunks', 'of data']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.source': 'SOS'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'somechunksof data')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertUpdateStats('SOS.GET.200.xfer', len(resp_body), app)
def test_log_headers(self):
for conf_key in ['access_log_headers', 'log_headers']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{conf_key: 'yes'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('Host: localhost:80' in headers)
def test_access_log_headers_only(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(), {'log_headers': 'yes',
'access_log_headers_only': 'FIRST, seCond'})
app.access_logger = FakeLogger()
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET'},
headers={'First': '1',
'Second': '2',
'Third': '3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('First: 1' in headers)
self.assert_('Second: 2' in headers)
self.assert_('Third: 3' not in headers)
self.assert_('Host: localhost:80' not in headers)
def test_upload_size(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c/o/foo',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('some stuff')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff')))
self.assertUpdateStats('object.PUT.200.xfer',
len('some stuff') + len('FAKE APP'),
app)
def test_upload_line(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO.StringIO(
'some stuff\nsome other stuff\n')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff\n')))
self.assertUpdateStats('container.POST.200.xfer',
len('some stuff\n') + len('FAKE APP'),
app)
def test_log_query_string(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'x=3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(unquote(log_parts[4]), '/?x=3')
def test_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '1.2.3.4') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_iterator_closing(self):
class CloseableBody(object):
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def __iter__(self):
return iter(["CloseableBody"])
body = CloseableBody()
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(body), {})
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
self.assertTrue(body.closed)
def test_proxy_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': '4.5.6.7,8.9.10.11'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_CLUSTER_CLIENT_IP': '4.5.6.7'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_facility(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(),
{'log_headers': 'yes',
'access_log_facility': 'LOG_LOCAL7'})
handler = get_logger.handler4logger[app.access_logger.logger]
self.assertEquals(SysLogHandler.LOG_LOCAL7, handler.facility)
def test_filter(self):
factory = proxy_logging.filter_factory({})
self.assert_(callable(factory))
self.assert_(callable(factory(FakeApp())))
def test_unread_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# read first chunk
next(resp)
resp.close() # raise a GeneratorExit in middleware app_iter loop
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[11], '4') # write length
def test_disconnect_on_readline(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_disconnect_on_read(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_app_exception(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppThatExcepts(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
try:
app(req.environ, start_response)
except Exception:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '500')
self.assertEquals(log_parts[10], '-') # read length
def test_no_content_length_no_transfer_encoding_with_list_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = next(iterator)"
body=['', '', 'line1\n', 'line2\n'],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'line1\nline2\n')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_no_content_length_no_transfer_encoding_with_empty_strings(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = next(iterator)"
body=['', '', ''],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, '')
self.assertEquals(log_parts[11], '-')
def test_no_content_length_no_transfer_encoding_with_generator(self):
class BodyGen(object):
def __init__(self, data):
self.data = data
def __iter__(self):
yield self.data
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
body=BodyGen('abc'),
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'abc')
self.assertEquals(log_parts[11], '3')
def test_req_path_info_popping(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/something', environ={'REQUEST_METHOD': 'GET'})
req.path_info_pop()
self.assertEquals(req.environ['PATH_INFO'], '/something')
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/v1/something')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_ipv6(self):
ipv6addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348'
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.remote_addr = ipv6addr
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], ipv6addr)
self.assertEquals(log_parts[1], ipv6addr)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_log_info_none(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = []
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
def test_log_info_single(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one')
def test_log_info_multiple(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one', 'and two']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one%2Cand%20two')
def test_log_auth_token(self):
auth_token = 'b05bf940-0464-4c0e-8c70-87717d2d73e8'
# Default - reveal_sensitive_prefix is 16
# No x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
# Has x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], 'b05bf940-0464-4c...')
# Truncate to first 8 characters
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], 'b05bf940...')
# Token length and reveal_sensitive_prefix are same (no truncate)
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': str(len(auth_token))})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# No effective limit on auth token
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': constraints.MAX_HEADER_SIZE})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# Don't log x-auth-token
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '...')
# Avoids pyflakes error, "local variable 'resp_body' is assigned to
# but never used
self.assertTrue(resp_body is not None)
def test_ensure_fields(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
with mock.patch('time.time',
mock.MagicMock(
side_effect=[10000000.0, 10000001.0])):
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(len(log_parts), 21)
self.assertEquals(log_parts[0], '-')
self.assertEquals(log_parts[1], '-')
self.assertEquals(log_parts[2], '26/Apr/1970/17/46/41')
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(log_parts[7], '-')
self.assertEquals(log_parts[8], '-')
self.assertEquals(log_parts[9], '-')
self.assertEquals(log_parts[10], '-')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertEquals(log_parts[12], '-')
self.assertEquals(log_parts[13], '-')
self.assertEquals(log_parts[14], '-')
self.assertEquals(log_parts[15], '1.0000')
self.assertEquals(log_parts[16], '-')
self.assertEquals(log_parts[17], '-')
self.assertEquals(log_parts[18], '10000000.000000000')
self.assertEquals(log_parts[19], '10000001.000000000')
self.assertEquals(log_parts[20], '-')
def test_dual_logging_middlewares(self):
# Since no internal request is being made, outer most proxy logging
# middleware, log1, should have performed the logging.
app = FakeApp()
flg0 = FakeLogger()
env = {}
log0 = proxy_logging.ProxyLoggingMiddleware(app, env, logger=flg0)
flg1 = FakeLogger()
log1 = proxy_logging.ProxyLoggingMiddleware(log0, env, logger=flg1)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = log1(req.environ, start_response)
resp_body = ''.join(resp)
self._log_parts(log0, should_be_empty=True)
log_parts = self._log_parts(log1)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_dual_logging_middlewares_w_inner(self):
class FakeMiddleware(object):
"""
Fake middleware to make a separate internal request, but construct
the response with different data.
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
def GET(self, req):
# Make the internal request
ireq = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(ireq.environ, start_response)
resp_body = ''.join(resp)
if resp_body != 'FAKE APP':
return Response(request=req,
body="FAKE APP WAS NOT RETURNED",
content_type="text/plain")
# But our response is different
return Response(request=req, body="FAKE MIDDLEWARE",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
return self.GET(req)(env, start_response)
# Since an internal request is being made, inner most proxy logging
# middleware, log0, should have performed the logging.
app = FakeApp()
flg0 = FakeLogger()
env = {}
log0 = proxy_logging.ProxyLoggingMiddleware(app, env, logger=flg0)
fake = FakeMiddleware(log0, env)
flg1 = FakeLogger()
log1 = proxy_logging.ProxyLoggingMiddleware(fake, env, logger=flg1)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = log1(req.environ, start_response)
resp_body = ''.join(resp)
# Inner most logger should have logged the app's response
log_parts = self._log_parts(log0)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(log_parts[11], str(len('FAKE APP')))
# Outer most logger should have logged the other middleware's response
log_parts = self._log_parts(log1)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE MIDDLEWARE')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_policy_index(self):
# Policy index can be specified by X-Backend-Storage-Policy-Index
# in the request header for object API
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '1'})
resp = app(req.environ, start_response)
''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[20], '1')
# Policy index can be specified by X-Backend-Storage-Policy-Index
# in the response header for container API
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/c', environ={'REQUEST_METHOD': 'GET'})
def fake_call(app, env, start_response):
start_response(app.response_str,
[('Content-Type', 'text/plain'),
('Content-Length', str(sum(map(len, app.body)))),
('X-Backend-Storage-Policy-Index', '1')])
while env['wsgi.input'].read(5):
pass
return app.body
with mock.patch.object(FakeApp, '__call__', fake_call):
resp = app(req.environ, start_response)
''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[20], '1')
if __name__ == '__main__':
unittest.main()
|
|
import pickle
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.utils._encode import _unique
from sklearn.utils._encode import _encode
from sklearn.utils._encode import _check_unknown
@pytest.mark.parametrize(
"values, expected",
[
(np.array([2, 1, 3, 1, 3], dtype="int64"), np.array([1, 2, 3], dtype="int64")),
(
np.array(["b", "a", "c", "a", "c"], dtype=object),
np.array(["a", "b", "c"], dtype=object),
),
(np.array(["b", "a", "c", "a", "c"]), np.array(["a", "b", "c"])),
],
ids=["int64", "object", "str"],
)
def test_encode_util(values, expected):
uniques = _unique(values)
assert_array_equal(uniques, expected)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([1, 0, 2, 0, 2]))
def test_encode_with_check_unknown():
# test for the check_unknown parameter of _encode()
uniques = np.array([1, 2, 3])
values = np.array([1, 2, 3, 4])
# Default is True, raise error
with pytest.raises(ValueError, match="y contains previously unseen labels"):
_encode(values, uniques=uniques, check_unknown=True)
# dont raise error if False
_encode(values, uniques=uniques, check_unknown=False)
# parameter is ignored for object dtype
uniques = np.array(["a", "b", "c"], dtype=object)
values = np.array(["a", "b", "c", "d"], dtype=object)
with pytest.raises(ValueError, match="y contains previously unseen labels"):
_encode(values, uniques=uniques, check_unknown=False)
def _assert_check_unknown(values, uniques, expected_diff, expected_mask):
diff = _check_unknown(values, uniques)
assert_array_equal(diff, expected_diff)
diff, valid_mask = _check_unknown(values, uniques, return_mask=True)
assert_array_equal(diff, expected_diff)
assert_array_equal(valid_mask, expected_mask)
@pytest.mark.parametrize(
"values, uniques, expected_diff, expected_mask",
[
(np.array([1, 2, 3, 4]), np.array([1, 2, 3]), [4], [True, True, True, False]),
(np.array([2, 1, 4, 5]), np.array([2, 5, 1]), [4], [True, True, False, True]),
(np.array([2, 1, np.nan]), np.array([2, 5, 1]), [np.nan], [True, True, False]),
(
np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True],
),
(
np.array([2, 1, 4, np.nan]),
np.array([2, 5, 1]),
[4, np.nan],
[True, True, False, False],
),
(
np.array([2, 1, 4, 5]),
np.array([2, 5, 1, np.nan]),
[4],
[True, True, False, True],
),
(
np.array(["a", "b", "c", "d"], dtype=object),
np.array(["a", "b", "c"], dtype=object),
np.array(["d"], dtype=object),
[True, True, True, False],
),
(
np.array(["d", "c", "a", "b"], dtype=object),
np.array(["a", "c", "b"], dtype=object),
np.array(["d"], dtype=object),
[False, True, True, True],
),
(
np.array(["a", "b", "c", "d"]),
np.array(["a", "b", "c"]),
np.array(["d"]),
[True, True, True, False],
),
(
np.array(["d", "c", "a", "b"]),
np.array(["a", "c", "b"]),
np.array(["d"]),
[False, True, True, True],
),
],
)
def test_check_unknown(values, uniques, expected_diff, expected_mask):
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [None, np.nan, float("nan")])
@pytest.mark.parametrize("pickle_uniques", [True, False])
def test_check_unknown_missing_values(missing_value, pickle_uniques):
# check for check_unknown with missing values with object dtypes
values = np.array(["d", "c", "a", "b", missing_value], dtype=object)
uniques = np.array(["c", "a", "b", missing_value], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ["d"]
expected_mask = [False, True, True, True, True]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(["d", "c", "a", "b", missing_value], dtype=object)
uniques = np.array(["c", "a", "b"], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = ["d", missing_value]
expected_mask = [False, True, True, True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
values = np.array(["a", missing_value], dtype=object)
uniques = np.array(["a", "b", "z"], dtype=object)
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
expected_diff = [missing_value]
expected_mask = [True, False]
_assert_check_unknown(values, uniques, expected_diff, expected_mask)
@pytest.mark.parametrize("missing_value", [np.nan, None, float("nan")])
@pytest.mark.parametrize("pickle_uniques", [True, False])
def test_unique_util_missing_values_objects(missing_value, pickle_uniques):
# check for _unique and _encode with missing values with object dtypes
values = np.array(["a", "c", "c", missing_value, "b"], dtype=object)
expected_uniques = np.array(["a", "b", "c", missing_value], dtype=object)
uniques = _unique(values)
if missing_value is None:
assert_array_equal(uniques, expected_uniques)
else: # missing_value == np.nan
assert_array_equal(uniques[:-1], expected_uniques[:-1])
assert np.isnan(uniques[-1])
if pickle_uniques:
uniques = pickle.loads(pickle.dumps(uniques))
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, np.array([0, 2, 2, 3, 1]))
def test_unique_util_missing_values_numeric():
# Check missing values in numerical values
values = np.array([3, 1, np.nan, 5, 3, np.nan], dtype=float)
expected_uniques = np.array([1, 3, 5, np.nan], dtype=float)
expected_inverse = np.array([1, 0, 3, 2, 1, 3])
uniques = _unique(values)
assert_array_equal(uniques, expected_uniques)
uniques, inverse = _unique(values, return_inverse=True)
assert_array_equal(uniques, expected_uniques)
assert_array_equal(inverse, expected_inverse)
encoded = _encode(values, uniques=uniques)
assert_array_equal(encoded, expected_inverse)
def test_unique_util_with_all_missing_values():
# test for all types of missing values for object dtype
values = np.array([np.nan, "a", "c", "c", None, float("nan"), None], dtype=object)
uniques = _unique(values)
assert_array_equal(uniques[:-1], ["a", "c", None])
# last value is nan
assert np.isnan(uniques[-1])
expected_inverse = [3, 0, 1, 1, 2, 3, 2]
_, inverse = _unique(values, return_inverse=True)
assert_array_equal(inverse, expected_inverse)
def test_check_unknown_with_both_missing_values():
# test for both types of missing values for object dtype
values = np.array([np.nan, "a", "c", "c", None, np.nan, None], dtype=object)
diff = _check_unknown(values, known_values=np.array(["a", "c"], dtype=object))
assert diff[0] is None
assert np.isnan(diff[1])
diff, valid_mask = _check_unknown(
values, known_values=np.array(["a", "c"], dtype=object), return_mask=True
)
assert diff[0] is None
assert np.isnan(diff[1])
assert_array_equal(valid_mask, [False, True, True, True, False, False, False])
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for statistics views."""
import ast
from core import jobs
from core.domain import calculation_registry
from core.domain import exp_services
from core.domain import interaction_registry
from core.platform import models
import feconf
(base_models, stats_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.statistics, models.NAMES.exploration
])
transaction_services = models.Registry.import_transaction_services()
# Counts contributions from all versions.
VERSION_ALL = 'all'
class InteractionAnswerSummariesMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Job to calculate interaction view statistics, e.g. most frequent answers
of multiple-choice interactions.
"""
@classmethod
def _get_continuous_computation_class(cls):
"""Returns the InteractionAnswerSummariesAggregator class associated
with this MapReduce job.
"""
return InteractionAnswerSummariesAggregator
@classmethod
def entity_classes_to_map_over(cls):
"""Returns the StateAnswersModel object."""
return [stats_models.StateAnswersModel]
# TODO(bhenning): Update this job to persist results for all older
# exploration versions, since those versions should never have new answers
# submitted to them. Moreover, answers are also only added so this job might
# be further optimized to increment on previous results, rather than
# recomputing results from scratch each time.
@staticmethod
def map(item):
"""Returns the submitted answer in dict format:
{
'state_answers_model_id': The id of the submitted output
answer.
'interaction_id': The interaction id to which the submitted
output answer belongs to.
'exploration_version': The exploration version to which the
submitted output answer belongs to.
}
Args:
item: The submitted answer.
Yields:
dict(str, str). The submitted answer in dict format.
"""
if InteractionAnswerSummariesMRJobManager._entity_created_before_job_queued( # pylint: disable=line-too-long
item):
# Output answers submitted to the exploration for this exp version.
versioned_key = u'%s:%s:%s' % (
item.exploration_id, item.exploration_version, item.state_name)
yield (versioned_key.encode('utf-8'), {
'state_answers_model_id': item.id,
'interaction_id': item.interaction_id,
'exploration_version': item.exploration_version
})
# Output the same set of answers independent of the version. This
# allows the reduce step to aggregate answers across all
# exploration versions.
all_versions_key = u'%s:%s:%s' % (
item.exploration_id, VERSION_ALL, item.state_name)
yield (all_versions_key.encode('utf-8'), {
'state_answers_model_id': item.id,
'interaction_id': item.interaction_id,
'exploration_version': item.exploration_version
})
@staticmethod
def reduce(key, stringified_values):
"""Calculates and saves each answer submitted for the exploration.
Args:
key: str. The unique key of the form:
<exploration_id>:<exploration_version>:<state_name>
stringified_values: list(str). A list of stringified_values of the
submitted answers.
Yields:
str. One of the following strings:
- Expected a single version when aggregating answers for:
Occurs when the versions list contains multiple versions
instead of a specific version.
- Expected exactly one interaction ID for exploration:
Occurs when there is not exactly one interaction ID
for each exploration and version.
- Expected at least one item ID for exploration:
Occurs when there is not at least one Item ID for
each exploration and version.
- Ignoring answers submitted to version:
Occurs when version mismatches and the new
version has a different interaction ID.
"""
exploration_id, exploration_version, state_name = key.split(':')
value_dicts = [
ast.literal_eval(stringified_value)
for stringified_value in stringified_values]
# Extract versions in descending order since answers are prioritized
# based on recency.
versions = list(set([
int(value_dict['exploration_version'])
for value_dict in value_dicts]))
versions.sort(reverse=True)
# For answers mapped to specific versions, the versions list should only
# contain the version they correspond to. Otherwise, if they map to
# VERSION_ALL, then multiple versions may be included.
if exploration_version != VERSION_ALL and (
len(versions) != 1 or versions[0] != exploration_version):
yield (
'Expected a single version when aggregating answers for '
'exploration %s (v=%s), but found: %s' % (
exploration_id, exploration_version, versions))
# Map interaction IDs and StateAnswersModel IDs to exploration versions.
versioned_interaction_ids = {version: set() for version in versions}
versioned_item_ids = {version: set() for version in versions}
for value_dict in value_dicts:
version = value_dict['exploration_version']
versioned_interaction_ids[version].add(value_dict['interaction_id'])
versioned_item_ids[version].add(
value_dict['state_answers_model_id'])
# Convert the interaction IDs to a list so they may be easily indexed.
versioned_interaction_ids = {
v: list(interaction_ids)
for v, interaction_ids in versioned_interaction_ids.iteritems()
}
# Verify all interaction ID and item ID containers are well-structured.
for version, interaction_ids in versioned_interaction_ids.iteritems():
if len(interaction_ids) != 1:
yield (
'Expected exactly one interaction ID for exploration %s '
'and version %s, found: %s' % (
exploration_id, version, len(interaction_ids)))
for version, item_ids in versioned_item_ids.iteritems():
if not item_ids:
yield (
'Expected at least one item ID for exploration %s and '
'version %s, found: %s' % (
exploration_id, version, len(item_ids)))
# Filter out any item IDs which happen at and before a version with a
# changed interaction ID. Start with the most recent version since it
# will refer to the most relevant answers.
latest_version = versions[0]
latest_interaction_id = versioned_interaction_ids[latest_version][0]
# Ensure the exploration corresponding to these answers exists.
exp = exp_services.get_exploration_by_id(
exploration_id, strict=False)
if exp is None:
return
if exploration_version == VERSION_ALL:
# If aggregating across all versions, verify that the latest answer
# version is equal to the latest version of the exploration,
# otherwise ignore all answers since none of them can be applied to
# the latest version.
if state_name in exp.states:
loaded_interaction_id = exp.states[state_name].interaction.id
# Only check if the version mismatches if the new version has a
# different interaction ID.
if latest_interaction_id != loaded_interaction_id and (
latest_version != exp.version):
yield (
'Ignoring answers submitted to version %s and below '
'since the latest exploration version is %s' % (
latest_version, exp.version))
versions = []
# In the VERSION_ALL case, we only take into account the most recent
# consecutive block of versions with the same interaction ID as the
# current version, and ignore all versions prior to this block. This
# logic isn't needed for individually-mapped versions and, in that case,
# we skip all this code in favor of performance.
if len(versions) > 1:
invalid_version_indexes = [
index for index, version in enumerate(versions)
if versioned_interaction_ids[version][0] != (
latest_interaction_id)]
earliest_acceptable_version_index = (
invalid_version_indexes[0] - 1
if invalid_version_indexes else len(versions) - 1)
earliest_acceptable_version = versions[
earliest_acceptable_version_index]
# Trim away anything related to the versions which correspond to
# different or since changed interaction IDs.
ignored_versions = [
version for version in versions
if version < earliest_acceptable_version]
for ignored_version in ignored_versions:
del versioned_interaction_ids[ignored_version]
del versioned_item_ids[ignored_version]
versions = versions[:earliest_acceptable_version_index + 1]
# Retrieve all StateAnswerModel entities associated with the remaining
# item IDs which correspond to a single interaction ID shared among all
# the versions between start_version and latest_version, inclusive.
item_ids = set()
for version in versions:
item_ids.update(versioned_item_ids[version])
# Collapse the list of answers into a single answer dict. This
# aggregates across multiple answers if the key ends with VERSION_ALL.
# TODO(bhenning): Find a way to iterate across all answers more
# efficiently and by not loading all answers for a particular
# exploration into memory.
submitted_answer_list = []
combined_state_answers = {
'exploration_id': exploration_id,
'exploration_version': exploration_version,
'state_name': state_name,
'interaction_id': latest_interaction_id,
'submitted_answer_list': submitted_answer_list
}
# NOTE: The answers stored in submitted_answers_list must be sorted
# according to the chronological order of their submission otherwise
# TopNUnresolvedAnswersByFrequency calculation will output invalid
# results.
state_answers_models = stats_models.StateAnswersModel.get_multi(
item_ids)
for state_answers_model in state_answers_models:
if state_answers_model:
submitted_answer_list += (
state_answers_model.submitted_answer_list)
# Get all desired calculations for the current interaction id.
calc_ids = interaction_registry.Registry.get_interaction_by_id(
latest_interaction_id).answer_calculation_ids
calculations = [
calculation_registry.Registry.get_calculation_by_id(calc_id)
for calc_id in calc_ids]
# Perform each calculation, and store the output.
for calc in calculations:
calc_output = calc.calculate_from_state_answers_dict(
combined_state_answers)
calc_output.save()
class InteractionAnswerSummariesRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
# TODO(bhenning): Implement a real-time model for
# InteractionAnswerSummariesAggregator.
"""Realtime model class for InteractionAnswerSummariesAggregator."""
pass
class InteractionAnswerSummariesAggregator(
jobs.BaseContinuousComputationManager):
"""A continuous-computation job that listens to answers to states and
updates StateAnswer view calculations.
"""
@classmethod
def get_event_types_listened_to(cls):
"""Returns a list of event types that this class subscribes to.
Returns:
list(str). A list of submitted answer event type.
"""
return [feconf.EVENT_TYPE_ANSWER_SUBMITTED]
@classmethod
def _get_realtime_datastore_class(cls):
"""Returns InteractionAnswerSummariesRealtimeModel class for
InteractionAnswerSummariesAggregator.
"""
return InteractionAnswerSummariesRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
"""Returns InteractionAnswerSummariesMRJobManager class which calculates
interaction view statistics.
"""
return InteractionAnswerSummariesMRJobManager
|
|
"""
Copyright (c) 2015
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOS EARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES;LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
notification.py: Parser for notification events received from Controller
"""
import os
import re
import string
import xmltodict
yang_namespace_to_prefix_map = {
'urn:opendaylight:model:topology:inventory' : 'nt1',
'urn:TBD:params:xml:ns:yang:network-topology' : 'nt2',
'urn:opendaylight:inventory' : 'inv',
'urn:opendaylight:flow:inventory' : 'flownode',
'urn:opendaylight:host-tracker' : 'host-track',
}
def yang_nsname_to_prefix(nsname):
if nsname in yang_namespace_to_prefix_map:
return yang_namespace_to_prefix_map[nsname]
else:
return nsname
def yang_prefix_to_nsname(prefix):
for k, v in yang_namespace_to_prefix_map:
if v == prefix:
return k
return prefix
#-------------------------------------------------------------------------------
# Class 'NetworkTopologyChangeNotification'
#-------------------------------------------------------------------------------
class NetworkTopologyChangeNotification():
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, event):
d = xmltodict.parse(event)
try:
p1 = 'notification'
notification = d[p1]
p2 = 'eventTime'
self.timestamp = notification[p2]
self.events = []
p3 = 'data-changed-notification'
p4 = 'data-change-event'
events = notification[p3][p4]
if isinstance(events, list):
for item in events:
tc_evt = TopoChangeEvent(item)
self.events.append(tc_evt)
elif isinstance(events, dict):
tc_evt = TopoChangeEvent(events)
self.events.append(tc_evt)
else:
assert(False), "TBD data=%s, type=%s" % (events, type(events))
self.added_switches = []
self.removed_switches = []
self.added_hosts = []
self.removed_hosts = []
self.added_links = []
self.removed_links = []
for event in self.events:
if event.created():
if event.is_switch():
self.added_switches.append(event.get_node_id())
elif event.is_host():
self.added_hosts.append(event.get_node_id())
elif event.is_link():
self.added_links.append(event.get_link_id())
elif event.deleted():
if event.is_switch():
self.removed_switches.append(event.get_node_id())
elif event.is_host():
self.removed_hosts.append(event.get_node_id())
elif event.is_link():
self.removed_links.append(event.get_link_id())
except() as e:
assert(False)
print "Error, %s" % e
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_time(self):
return self.timestamp
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def switches_added(self):
return self.added_switches
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def switches_removed(self):
return self.removed_switches
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def hosts_added(self):
return self.added_hosts
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def hosts_removed(self):
return self.removed_hosts
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def links_added(self):
return self.added_links
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def links_removed(self):
return self.removed_links
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def print_events(self):
for event in self.events:
if event.is_link():
print "\n".strip()
event.do_print()
print "\n".strip()
else:
print "\n".strip()
event.do_print()
print "\n".strip()
#-------------------------------------------------------------------------------
# Class 'TopoChangeEvent'
#-------------------------------------------------------------------------------
class TopoChangeEvent():
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, event):
p = 'path'
if isinstance(event, dict):
for k,v in event.items():
if k == p:
self.path_info = PathInfo(v)
else:
setattr(self, k, v)
else:
assert (False), " TBD evt=%s, type=%s" % (event, type(event))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def created(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'created')
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def deleted(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'deleted')
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def updated(self):
res = False
p = 'operation'
if hasattr(self, p):
attr = getattr(self, p)
res = (attr == 'updated')
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_path(self):
path = None
p = 'path'
if hasattr(self, p):
p3 = '#text'
attr = getattr(self, p)
path = attr[p3]
return path
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def is_node(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*node-id$'
r = re.search(p1, basename)
if r != None:
res = True
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def is_switch(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('openflow'):
res = True
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def is_host(self):
res = False
if self.is_node():
node_id = self.get_node_id()
if node_id and node_id.startswith('host'):
res = True
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_node_id(self):
node_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'node-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
node_id = s[idx + len(p):].translate(None , "[]'\"")
break
return node_id
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def is_link(self):
res = False
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
basename = os.path.basename(path)
if basename:
p1 = '.*link-id$'
r = re.search(p1, basename)
if r != None:
res = True
return res
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def get_link_id(self):
link_id = None
p = 'path_info'
if hasattr(self, p):
path = self.path_info.path
chunks = repr(path).split(']')
if chunks:
p = 'link-id='
for s in chunks:
idx = s.find(p)
if(idx >= 0):
link_id = s[idx + len(p):].translate(None , "[]'\"")
break
return link_id
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def do_print(self):
print " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<"
print " operation: %s" % self.operation
self.path_info.do_print()
print " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
#-------------------------------------------------------------------------------
# Class 'PathInfo'
#-------------------------------------------------------------------------------
class PathInfo():
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def __init__(self, info):
if isinstance(info, dict):
p1 = '#text'
p2 = '@xmlns'
try:
path = info[p1]
namespaces = []
for k, v in info.items():
if k.startswith(p2):
pfx = yang_nsname_to_prefix(v)
d = {'ns': v, 'pfx': pfx}
namespaces.append(d)
nickname = k.split(':')[-1]
path = string.replace(path, nickname, pfx)
self.namespaces = namespaces
self.path = path
except:
print "Error, unexpected data format"
else:
assert (False), " TBD evt=%s, type=%s" % (info, type(info))
#---------------------------------------------------------------------------
#
#---------------------------------------------------------------------------
def do_print(self):
for ns in self.namespaces:
print " namespace: %s (prefix: %s)" % (ns['ns'], ns['pfx'])
print " path: %s" % self.path
|
|
"""
A set of helper classes for better pipelining of data preprocessing
for machine learning and beyond.
"""
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import Lasso
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.dummy import DummyRegressor, DummyClassifier
from searchgrid import set_grid
class IOTransform(BaseEstimator):
"""
A base class for training.
Implements a set of useful methods and variables, such
that preprocessing of the data can be done using scikit-learn
like class instances.
Parameters
----------
X_prep : BaseEstimator
Class instance that will be fitted to the input X
for the model. This transformer is applied to the
input X before it is fed into the model.
Y_prep : BaseEstimator
Class instance that will be fitted to the output values Y
for the model. This transformer is applied to the values of
Y when it is used for training.
Y_post : BaseEstimator
Class instance that will be fitted to the output values Y
for the model. This transformer is applied after the values
are generated.
model : BaseEstimator
Instance of a class that is used for mapping from inputs to
outputs.
metric : callable with two arguments
Scorer that is used to evaluate predictions of the model. If
None, the score function of the model will be used.
"""
_estimator_type = "generator"
def __init__(self, model, metric=None, augm=None, X_prep=None, Y_prep=None, Y_post=None):
self.X_prep = X_prep
self.Y_prep = Y_prep
self.Y_post = Y_post
if not isinstance(model, BaseEstimator):
raise TypeError('Model should be an instance of BaseEstimator, got %s' % model)
self.model = model
self.metric = metric
self.augm = augm
def set_params(self, **params):
"""
Custom setting of parameters for generative models.
All parameters that start with 'x_prep', 'y_prep', 'y_post' are
delegated to respective preprocessors.
"""
elements = {'augm', 'X_prep', 'Y_prep', 'Y_post', 'model'}
self_params = {
k:v for k, v in params.items()
if not any(
k.startswith(p.lower()) for p in elements
)
}
BaseEstimator.set_params(self, **self_params)
# set attributes of elements
for e in elements:
element = getattr(self, e)
if isinstance(element, BaseEstimator):
subprm = {
k[len(e)+2:]: v for k, v in params.items()
if k.startswith(e.lower())
}
element.set_params(**subprm)
return self
def _fit_preprocessors(self, X, Y):
"""Fits all preprocessors to the data.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
"""
if self.augm is not None:
X, Y = self.augm.fit_transform(X, Y)
if self.X_prep is not None:
X = self.X_prep.fit_transform(X, Y)
if self.Y_post is not None:
self.Y_post.fit(Y, X)
if self.Y_prep is not None:
Y = self.Y_prep.fit_transform(Y, X)
return X, Y
def _transform_inputs(self, X, Y=None):
"""Transforms inputs so that they can be used for estimations
with generative model
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
"""
if self.X_prep is not None:
# account for some transformers taking only single argument
if 'Y' in self.X_prep.transform.__code__.co_varnames:
X = self.X_prep.transform(X, Y)
else:
X = self.X_prep.transform(X)
if Y is None:
return X
if self.Y_prep is not None:
# account for some transformers taking only single argument
if 'Y' in self.Y_prep.transform.__code__.co_varnames:
Y = self.Y_prep.transform(Y, X)
else:
Y = self.Y_prep.transform(Y)
return X, Y
def _transform_generated_outputs(self, Y, X=None):
"""Apply output transformers to the generated values
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
"""
if self.Y_prep is not None:
if 'Y' in self.Y_prep.inverse_transform.__code__.co_varnames:
Y = self.Y_prep.inverse_transform(Y, X)
else:
Y = self.Y_prep.inverse_transform(Y)
if self.Y_post is not None:
if 'Y' in self.Y_post.transform.__code__.co_varnames:
Y = self.Y_post.transform(Y, X)
else:
Y = self.Y_post.transform(Y)
return Y
def fit(self, X, Y, *args, **kwargs):
"""
Complete fitting pipeline with data preprocessing for generative
models.
Includes data augmentation.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
"""
X, Y = self._fit_preprocessors(X, Y)
self.model.fit(X, Y, *args, **kwargs)
return self
def predict(self, X, *args, **kwargs):
"""
Full generation pipeline with all necessary steps such as data
preprocessing.
IMPORTANT: this function does not do augmentation of input
values! Hence a particular form of X should be the one
that self.augm returns.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
"""
X = self._transform_inputs(X)
Y = self.model.predict(X, *args, **kwargs)
Y = self._transform_generated_outputs(Y, X)
return Y
def score_no_augmentation(self, X, Y, *args, **kwargs):
"""
Evaluates the quality of the model using comparison
to real data.
DOES NOT include the data augmentation.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
Returns
-------
score : float
Score from 0.0 to 1.0 that indicates quality of estimations.
"""
if self.metric:
Yp = self.predict(X, *args, **kwargs)
score = self.metric(Y, Yp)
else:
score = self.model.score(X, Y)
return score
def score(self, X, Y, *args, **kwargs):
"""
Evaluates the quality of the model using comparison
to real data.
Includes data augmentation.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, ...]
The data used as inputs to generatie model's outputs.
Y : {array-like, sparse matrix}, shape [n_samples, ...]
The target values estimated by the model.
Returns
-------
score : float
Score from 0.0 to 1.0 that indicates quality of estimations.
"""
if self.augm is not None:
X, Y = self.augm.transform(X, Y)
return self.score_no_augmentation(X, Y, *args, **kwargs)
def make_regressors(subset=None):
available_regressors = {
'gbrt': set_grid(GradientBoostingRegressor(),
n_estimators=[2 ** i for i in range(1, 11)],
learning_rate=[0.1, 0.01, 0.001],
),
'lasso': set_grid(Lasso(),
alpha=np.exp(np.linspace(-8, 8)),
)
}
if subset is None:
subset = list(available_regressors.keys())
result = [available_regressors[k] for k in subset]
pipe = Pipeline(
[('finmodel', DummyRegressor())]
)
pipe = set_grid(pipe, finmodel=result)
return pipe
def grid_regressors(subset=None):
"""Create a number of regressors with corresponding
parameter ranges that will be iterated over with
GridSearchCV.
Parameters
----------
subset : array - like or None
Subset of models to use for grid search.
Returns
-------
regressors : list
List of regressors with attached grids.
"""
available_regressors = {
'gbrt': set_grid(
GradientBoostingRegressor(),
n_estimators=[2 ** i for i in range(1, 11)],
learning_rate=[0.1, 0.01, 0.001]
),
'svr': set_grid(
SVR(),
C=np.logspace(-5, 5, 20),
epsilon=[0.1, 0.01, 0.001],
gamma=np.logspace(-5, 5, 20)
),
'tree': set_grid(
DecisionTreeRegressor(),
min_samples_split=np.logspace(-5, 0, 20),
max_depth=list(range(1, 20)),
),
'lasso': set_grid(
Lasso(),
alpha=np.exp(np.linspace(-8, 8)),
)
}
if subset is None:
subset = list(available_regressors.keys())
result = [available_regressors[k] for k in subset]
return result
def make_dummy_regressor(subset=None):
available_regressors = {
'dummy': set_grid(DummyRegressor(),
strategy=['mean', 'median']
),
}
if subset is None:
subset = list(available_regressors.keys())
result = [available_regressors[k] for k in subset]
pipe = Pipeline(
[('model', Lasso())]
)
pipe = set_grid(pipe, model=result)
return pipe
class DummyClassifierNx(DummyClassifier):
def __init__(self, strategy="stratified", random_state=None,
constant=None):
super(DummyClassifierNx, self).__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def _X(self, X):
return np.zeros((len(X), 1))
def fit(self, X, y, sample_weight=None):
X = self._X(X)
return super(DummyClassifierNx, self).fit(X, y, sample_weight=sample_weight)
def predict(self, X):
X = self._X(X)
return super(DummyClassifierNx, self).predict(X)
def score(self, X, y, sample_weight=None):
X = self._X(X)
return super(DummyClassifierNx, self).score(X, y, sample_weight=sample_weight)
class DummyRegressorNx(DummyRegressor):
def __init__(self, strategy="mean", constant=None, quantile=None):
super(DummyRegressor, self).__init__(
strategy=strategy, quantile=quantile, constant=constant
)
def _X(self, X):
return np.zeros((len(X), 1))
def fit(self, X, y, sample_weight=None):
X = self._X(X)
return super(DummyRegressorNx, self).fit(X, y, sample_weight=sample_weight)
def predict(self, X):
X = self._X(X)
return super(DummyRegressorNx, self).predict(X)
def score(self, X, y, sample_weight=None):
X = self._X(X)
return super(DummyRegressorNx, self).score(X, y, sample_weight=sample_weight)
|
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid.layers as layers
import paddle.fluid as fluid
import random
import six
from sys import version_info
def create_tdm_travel():
tree_travel = [[1, 3, 7, 14], [1, 3, 7, 15], [1, 3, 8, 16], [1, 3, 8, 17],
[1, 4, 9, 18], [1, 4, 9, 19], [1, 4, 10, 20],
[1, 4, 10, 21], [2, 5, 11, 22], [2, 5, 11, 23],
[2, 5, 12, 24], [2, 5, 12, 25], [2, 6, 13, 0]]
return tree_travel
def create_tdm_layer():
tree_layer = [[1, 2], [3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13],
[14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]]
return tree_layer
type_dict = {
"int32": int(core.VarDesc.VarType.INT32),
"int64": int(core.VarDesc.VarType.INT64)
}
class TestTDMSamplerOp(OpTest):
def setUp(self):
self.__class__.op_type = "tdm_sampler"
self.config()
self.tree_travel = create_tdm_travel()
self.tree_layer = create_tdm_layer()
output_0 = self.x_shape[0]
output_1 = len(self.neg_samples_num_list) + \
np.sum(self.neg_samples_num_list)
self.output_shape = (output_0, output_1)
self.layer_sample_nums = [1 + i for i in self.neg_samples_num_list]
layer_node_num_list = [len(i) for i in self.tree_layer]
tree_layer_offset_lod = [0]
tree_layer_flat = []
node_nums = 0
for layer_idx, layer_node in enumerate(layer_node_num_list):
tree_layer_flat += self.tree_layer[layer_idx]
node_nums += layer_node
tree_layer_offset_lod.append(node_nums)
travel_np = np.array(self.tree_travel).astype(self.tree_dtype)
layer_np = np.array(tree_layer_flat).astype(self.tree_dtype)
layer_np = layer_np.reshape([-1, 1])
self.x_np = np.random.randint(
low=0, high=13, size=self.x_shape).astype(self.x_type)
out = np.random.random(self.output_shape).astype(self.out_dtype)
label = np.random.random(self.output_shape).astype(self.out_dtype)
mask = np.random.random(self.output_shape).astype(self.out_dtype)
self.attrs = {
'neg_samples_num_list': self.neg_samples_num_list,
'output_positive': True,
'layer_offset_lod': tree_layer_offset_lod,
'seed': 0,
'dtype': type_dict[self.out_dtype]
}
self.inputs = {'X': self.x_np, 'Travel': travel_np, 'Layer': layer_np}
self.outputs = {'Out': out, 'Labels': label, 'Mask': mask}
def config(self):
"""set test shape & type"""
self.neg_samples_num_list = [0, 0, 0, 0]
self.x_shape = (10, 1)
self.x_type = 'int32'
self.tree_dtype = 'int32'
self.out_dtype = 'int32'
def test_check_output(self):
places = self._get_places()
for place in places:
outs, fetch_list = self._calc_output(place)
self.out = [np.array(out) for out in outs]
x_res = self.out[fetch_list.index('Out')]
label_res = self.out[fetch_list.index('Labels')]
mask_res = self.out[fetch_list.index('Mask')]
# check dtype
if self.out_dtype == 'int32':
assert x_res.dtype == np.int32
assert label_res.dtype == np.int32
assert mask_res.dtype == np.int32
elif self.out_dtype == 'int64':
assert x_res.dtype == np.int64
assert label_res.dtype == np.int64
assert mask_res.dtype == np.int64
x_res = x_res.reshape(self.output_shape)
label_res = label_res.reshape(self.output_shape)
mask_res = mask_res.reshape(self.output_shape)
layer_nums = len(self.neg_samples_num_list)
for batch_ids, x_batch in enumerate(x_res):
start_offset = 0
positive_travel = []
for layer_idx in range(layer_nums):
end_offset = start_offset + self.layer_sample_nums[layer_idx]
sampling_res = x_batch[start_offset:end_offset]
sampling_res_list = sampling_res.tolist()
positive_travel.append(sampling_res_list[0])
label_sampling_res = label_res[batch_ids][start_offset:
end_offset]
mask_sampling_res = mask_res[batch_ids][start_offset:end_offset]
# check unique
if sampling_res_list[0] != 0:
assert len(set(sampling_res_list)) == len(
sampling_res_list
), "len(set(sampling_res_list)): {}, len(sampling_res_list): {} , sample_res: {}, label_res:{}, mask_res: {}".format(
len(set(sampling_res_list)),
len(sampling_res_list), sampling_res,
label_sampling_res, mask_sampling_res)
# check legal
layer_node = self.tree_layer[layer_idx]
layer_node.append(0)
for sample in sampling_res_list:
assert (
sample in layer_node
), "sample: {}, layer_node: {} , sample_res: {}, label_res: {}, mask_res:{}".format(
sample, layer_node, sampling_res, label_sampling_res,
mask_sampling_res)
# check label
label_flag = 1
if sampling_res[0] == 0:
label_flag = 0
assert label_sampling_res[0] == label_flag
# check mask
padding_index = np.where(sampling_res == 0)
assert not np.sum(
mask_sampling_res[padding_index]
), "np.sum(mask_sampling_res[padding_index]): {} ".format(
np.sum(mask_sampling_res[padding_index]))
start_offset = end_offset
# check travel legal
assert self.tree_travel[int(self.x_np[
batch_ids])] == positive_travel
class TestCase1(TestTDMSamplerOp):
def config(self):
"""test input int64"""
self.neg_samples_num_list = [0, 0, 0, 0]
self.x_shape = (10, 1)
self.x_type = 'int64'
self.tree_dtype = 'int64'
self.out_dtype = 'int32'
class TestCase2(TestTDMSamplerOp):
def config(self):
"""test dtype int64"""
self.neg_samples_num_list = [0, 0, 0, 0]
self.x_shape = (10, 1)
self.x_type = 'int32'
self.tree_dtype = 'int32'
self.out_dtype = 'int64'
class TestCase3(TestTDMSamplerOp):
def config(self):
"""test all dtype int64"""
self.neg_samples_num_list = [0, 0, 0, 0]
self.x_shape = (10, 1)
self.x_type = 'int64'
self.tree_dtype = 'int64'
self.out_dtype = 'int64'
class TestCase4(TestTDMSamplerOp):
def config(self):
"""test one neg"""
self.neg_samples_num_list = [1, 1, 1, 1]
self.x_shape = (10, 1)
self.x_type = 'int64'
self.tree_dtype = 'int32'
self.out_dtype = 'int64'
class TestCase5(TestTDMSamplerOp):
def config(self):
"""test normal neg"""
self.neg_samples_num_list = [1, 2, 3, 4]
self.x_shape = (10, 1)
self.x_type = 'int64'
self.tree_dtype = 'int32'
self.out_dtype = 'int64'
class TestCase6(TestTDMSamplerOp):
def config(self):
"""test huge batchsize"""
self.neg_samples_num_list = [1, 2, 3, 4]
self.x_shape = (100, 1)
self.x_type = 'int64'
self.tree_dtype = 'int32'
self.out_dtype = 'int64'
class TestCase7(TestTDMSamplerOp):
def config(self):
"""test full neg"""
self.neg_samples_num_list = [1, 3, 6, 11]
self.x_shape = (10, 1)
self.x_type = 'int64'
self.tree_dtype = 'int32'
self.out_dtype = 'int64'
class TestTDMSamplerShape(unittest.TestCase):
def test_shape(self):
x = fluid.layers.data(name='x', shape=[1], dtype='int32', lod_level=1)
tdm_tree_travel = create_tdm_travel()
tdm_tree_layer = create_tdm_layer()
layer_node_num_list = [len(i) for i in tdm_tree_layer]
tree_layer_flat = []
for layer_idx, layer_node in enumerate(layer_node_num_list):
tree_layer_flat += tdm_tree_layer[layer_idx]
travel_array = np.array(tdm_tree_travel).astype('int32')
layer_array = np.array(tree_layer_flat).astype('int32')
neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13
sample, label, mask = fluid.contrib.layers.tdm_sampler(
x,
neg_samples_num_list,
layer_node_num_list,
leaf_node_num,
tree_travel_attr=fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
travel_array)),
tree_layer_attr=fluid.ParamAttr(
initializer=fluid.initializer.NumpyArrayInitializer(
layer_array)),
output_positive=True,
output_list=True,
seed=0,
tree_dtype='int32',
dtype='int32')
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9],
[10], [11], [12]]).astype('int32')
}
exe.run(feed=feed)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from owslib import crs
from pycsw.core import util
from pycsw.core.etree import etree
LOGGER = logging.getLogger(__name__)
TYPES = ['gml:Point', 'gml:LineString', 'gml:Polygon', 'gml:Envelope']
DEFAULT_SRS = crs.Crs('urn:x-ogc:def:crs:EPSG:6.11:4326')
def _poslist2wkt(poslist, axisorder, geomtype):
"""Repurpose gml:posList into WKT aware list"""
tmp = poslist.split()
poslist2 = []
if geomtype == 'polygon':
len_ = 8
elif geomtype == 'line':
len_ = 4
if len(tmp) < len_:
msg = 'Invalid number of coordinates in geometry'
LOGGER.error(msg)
raise RuntimeError(msg)
xlist = tmp[::2]
ylist = tmp[1::2]
if axisorder == 'yx':
for i, j in zip(ylist, xlist):
poslist2.append('%s %s' % (i, j))
else:
for i, j in zip(xlist, ylist):
poslist2.append('%s %s' % (i, j))
return ', '.join(poslist2)
class Geometry(object):
"""base geometry class"""
def __init__(self, element, nsmap):
"""initialize geometry parser"""
self.nsmap = nsmap
self.type = None
self.wkt = None
self.crs = None
self._exml = element
# return OGC WKT for GML geometry
operand = element.xpath(
'|'.join(TYPES),
namespaces={'gml': 'http://www.opengis.net/gml'})[0]
if 'srsName' in operand.attrib:
LOGGER.debug('geometry srsName detected')
self.crs = crs.Crs(operand.attrib['srsName'])
else:
LOGGER.debug('setting default geometry srsName %s', DEFAULT_SRS)
self.crs = DEFAULT_SRS
self.type = etree.QName(operand).localname
if self.type == 'Point':
self._get_point()
elif self.type == 'LineString':
self._get_linestring()
elif self.type == 'Polygon':
self._get_polygon()
elif self.type == 'Envelope':
self._get_envelope()
else:
raise RuntimeError('Unsupported geometry type (Must be one of %s)'
% ','.join(TYPES))
# reproject data if needed
if self.crs is not None and self.crs.code not in [4326, 'CRS84']:
LOGGER.info('transforming geometry to 4326')
try:
self.wkt = self.transform(self.crs.code, DEFAULT_SRS.code)
except Exception as err:
LOGGER.exception('Coordinate transformation error')
raise RuntimeError('Reprojection error: Invalid srsName')
def _get_point(self):
"""Parse gml:Point"""
tmp = self._exml.find(util.nspath_eval('gml:Point/gml:pos',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Point geometry. Missing gml:pos')
else:
xypoint = tmp.text.split()
if self.crs.axisorder == 'yx':
self.wkt = 'POINT(%s %s)' % (xypoint[1], xypoint[0])
else:
self.wkt = 'POINT(%s %s)' % (xypoint[0], xypoint[1])
def _get_linestring(self):
"""Parse gml:LineString"""
tmp = self._exml.find(util.nspath_eval('gml:LineString/gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'LINESTRING(%s)' % _poslist2wkt(tmp.text,
self.crs.axisorder,
'line')
def _get_polygon(self):
"""Parse gml:Polygon"""
tmp = self._exml.find('.//%s' % util.nspath_eval('gml:posList',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:LineString geometry.\
Missing gml:posList')
else:
self.wkt = 'POLYGON((%s))' % _poslist2wkt(tmp.text,
self.crs.axisorder,
'polygon')
def _get_envelope(self):
"""Parse gml:Envelope"""
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:lowerCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:lowerCorner')
else:
lower_left = tmp.text
tmp = self._exml.find(util.nspath_eval('gml:Envelope/gml:upperCorner',
self.nsmap))
if tmp is None:
raise RuntimeError('Invalid gml:Envelope geometry.\
Missing gml:upperCorner')
else:
upper_right = tmp.text
llmin = lower_left.split()
urmax = upper_right.split()
if len(llmin) < 2 or len(urmax) < 2:
raise RuntimeError('Invalid gml:Envelope geometry. \
gml:lowerCorner and gml:upperCorner must hold at least x and y')
if self.crs.axisorder == 'yx':
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[1],
llmin[0], urmax[1], urmax[0]))
else:
self.wkt = util.bbox2wktpolygon('%s,%s,%s,%s' % (llmin[0],
llmin[1], urmax[0], urmax[1]))
def transform(self, src, dest):
"""transform coordinates from one CRS to another"""
import pyproj
from shapely.geometry import Point, LineString, Polygon
from shapely.wkt import loads
LOGGER.info('Transforming geometry from %s to %s', src, dest)
vertices = []
try:
proj_src = pyproj.Proj(init='epsg:%s' % src)
except:
raise RuntimeError('Invalid source projection')
try:
proj_dst = pyproj.Proj(init='epsg:%s' % dest)
except:
raise RuntimeError('Invalid destination projection')
geom = loads(self.wkt)
if geom.type == 'Point':
newgeom = Point(pyproj.transform(proj_src, proj_dst,
geom.x, geom.y))
wkt2 = newgeom.wkt
elif geom.type == 'LineString':
for vertice in list(geom.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
linestring = LineString(vertices)
wkt2 = linestring.wkt
elif geom.type == 'Polygon':
for vertice in list(geom.exterior.coords):
newgeom = pyproj.transform(proj_src, proj_dst,
vertice[0], vertice[1])
vertices.append(newgeom)
polygon = Polygon(vertices)
wkt2 = polygon.wkt
return wkt2
|
|
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
from neo4j._async.io._bolt4 import AsyncBolt4x4
from neo4j.conf import PoolConfig
from ...._async_compat import mark_async_test
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 0
connection = AsyncBolt4x4(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is True
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale_if_not_enabled(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = -1
connection = AsyncBolt4x4(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 999999999
connection = AsyncBolt4x4(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ({"db": "something"},)),
(("", {}), {"imp_user": "imposter"}, ({"imp_user": "imposter"},)),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
({"db": "something", "imp_user": "imposter"},)
),
))
@mark_async_test
async def test_extra_in_begin(fake_socket, args, kwargs, expected_fields):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.begin(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x11"
assert tuple(is_fields) == expected_fields
@pytest.mark.parametrize(("args", "kwargs", "expected_fields"), (
(("", {}), {"db": "something"}, ("", {}, {"db": "something"})),
(("", {}), {"imp_user": "imposter"}, ("", {}, {"imp_user": "imposter"})),
(
("", {}),
{"db": "something", "imp_user": "imposter"},
("", {}, {"db": "something", "imp_user": "imposter"})
),
))
@mark_async_test
async def test_extra_in_run(fake_socket, args, kwargs, expected_fields):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.run(*args, **kwargs)
await connection.send_all()
tag, is_fields = await socket.pop_message()
assert tag == b"\x10"
assert tuple(is_fields) == expected_fields
@mark_async_test
async def test_n_extra_in_discard(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == {"n": 666}
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": -1, "qid": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_discard(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": 666, "qid": 777}),
(-1, {"n": 666}),
]
)
@mark_async_test
async def test_n_and_qid_extras_in_discard(fake_socket, test_input, expected):
# python -m pytest tests/unit/io/test_class_bolt4x0.py -s -k test_n_and_qid_extras_in_discard
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666, qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": 666}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_n_extra_in_pull(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": -1, "qid": 777}),
(-1, {"n": -1}),
]
)
@mark_async_test
async def test_qid_extra_in_pull(fake_socket, test_input, expected):
# python -m pytest tests/unit/io/test_class_bolt4x0.py -s -k test_qid_extra_in_pull
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(qid=test_input)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@mark_async_test
async def test_n_and_qid_extras_in_pull(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = AsyncBolt4x4(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=666, qid=777)
await connection.send_all()
tag, fields = await socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == {"n": 666, "qid": 777}
@mark_async_test
async def test_hello_passes_routing_metadata(fake_socket_pair):
address = ("127.0.0.1", 7687)
sockets = fake_socket_pair(address)
await sockets.server.send_message(0x70, {"server": "Neo4j/4.4.0"})
connection = AsyncBolt4x4(
address, sockets.client, PoolConfig.max_connection_lifetime,
routing_context={"foo": "bar"}
)
await connection.hello()
tag, fields = await sockets.server.pop_message()
assert tag == 0x01
assert len(fields) == 1
assert fields[0]["routing"] == {"foo": "bar"}
@pytest.mark.parametrize(("hints", "valid"), (
({"connection.recv_timeout_seconds": 1}, True),
({"connection.recv_timeout_seconds": 42}, True),
({}, True),
({"whatever_this_is": "ignore me!"}, True),
({"connection.recv_timeout_seconds": -1}, False),
({"connection.recv_timeout_seconds": 0}, False),
({"connection.recv_timeout_seconds": 2.5}, False),
({"connection.recv_timeout_seconds": None}, False),
({"connection.recv_timeout_seconds": False}, False),
({"connection.recv_timeout_seconds": "1"}, False),
))
@mark_async_test
async def test_hint_recv_timeout_seconds(
fake_socket_pair, hints, valid, caplog, mocker
):
address = ("127.0.0.1", 7687)
sockets = fake_socket_pair(address)
sockets.client.settimeout = mocker.MagicMock()
await sockets.server.send_message(
0x70, {"server": "Neo4j/4.3.4", "hints": hints}
)
connection = AsyncBolt4x4(
address, sockets.client, PoolConfig.max_connection_lifetime
)
with caplog.at_level(logging.INFO):
await connection.hello()
if valid:
if "connection.recv_timeout_seconds" in hints:
sockets.client.settimeout.assert_called_once_with(
hints["connection.recv_timeout_seconds"]
)
else:
sockets.client.settimeout.assert_not_called()
assert not any("recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
else:
sockets.client.settimeout.assert_not_called()
assert any(repr(hints["connection.recv_timeout_seconds"]) in msg
and "recv_timeout_seconds" in msg
and "invalid" in msg
for msg in caplog.messages)
|
|
import os
import platform
import unittest
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient, GenConanfile
tool_conanfile = """
import os
from conans import ConanFile
class Tool(ConanFile):
name = "Tool"
version = "0.1"
exports_sources = "mytool*"
def package(self):
self.copy("mytool*")
def package_info(self):
self.env_info.PATH.append(self.package_folder)
"""
python_conanfile = """
import os
from conans import ConanFile
class Tool(ConanFile):
name = "PythonTool"
version = "0.1"
exports_sources = "mypythontool.py"
def package(self):
self.copy("mypythontool.py")
def package_info(self):
self.env_info.PYTHONPATH.append(self.package_folder)
"""
lib_conanfile = """
import os
from conans import ConanFile, tools
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
def build(self):
self.run("mytool")
import mypythontool
self.output.info(mypythontool.tool_hello_world())
"""
profile = """
[build_requires]
Tool/0.1@lasote/stable, PythonTool/0.1@lasote/stable
nonexistingpattern*: SomeTool/1.2@user/channel
"""
profile2 = """
[build_requires]
Tool/0.1@lasote/stable
PythonTool/0.1@lasote/stable
nonexistingpattern*: SomeTool/1.2@user/channel
"""
class BuildRequiresTest(unittest.TestCase):
def test_duplicated_build_requires(self):
client = TestClient()
build_require = """from conans import ConanFile
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": build_require})
client.run("create . build_require/0.1@user/testing")
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
pass
"""
client.save({"conanfile.py": conanfile})
client.run("export . MyLib/0.1@user/testing")
profile = """[build_requires]
build_require/0.1@user/testing
"""
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
requires = "MyLib/0.1@user/testing"
"""
test_conanfile = GenConanfile().with_test("pass")
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile,
"myprofile": profile})
client.run("create . Pkg/0.1@user/testing -pr=myprofile --build=missing")
self.assertEqual(1, str(client.out).count("build_require/0.1@user/testing "
"from local cache"))
self.assertIn("build_require/0.1@user/testing: Already installed!", client.out)
self.assertIn("Pkg/0.1@user/testing (test package): Applying build-requirement: "
"build_require/0.1@user/testing", client.out)
self.assertIn("Pkg/0.1@user/testing: Applying build-requirement: "
"build_require/0.1@user/testing", client.out)
self.assertIn("MyLib/0.1@user/testing: Applying build-requirement: "
"build_require/0.1@user/testing", client.out)
def test_recursive_build_requires(self):
client = TestClient()
profile = """[build_requires]
build1/0.1@user/testing
build2/0.1@user/testing
"""
client.save({"conanfile.py": GenConanfile(),
"myprofile": profile})
client.run("create . build1/0.1@user/testing")
client.run("create . build2/0.1@user/testing")
client.run("create . MyLib/0.1@user/testing -pr=myprofile --build")
self.assertEqual(2, str(client.out).count(
"Applying build-requirement"))
self.assertEqual(1, str(client.out).count(
"MyLib/0.1@user/testing: Applying build-requirement: build1/0.1@user/testing"))
self.assertEqual(1, str(client.out).count(
"MyLib/0.1@user/testing: Applying build-requirement: build2/0.1@user/testing"))
client.run("info MyLib/0.1@user/testing -pr=myprofile --dry-build")
# Only 1 node has build requires
self.assertEqual(1, str(client.out).count("Build Requires"))
def _create(self, client):
name = "mytool.bat" if platform.system() == "Windows" else "mytool"
client.save({CONANFILE: tool_conanfile,
name: "echo Hello World!"}, clean_first=True)
os.chmod(os.path.join(client.current_folder, name), 0o777)
client.run("export . lasote/stable")
client.save({CONANFILE: python_conanfile,
"mypythontool.py": """def tool_hello_world():
return 'Hello world from python tool!'"""}, clean_first=True)
client.run("export . lasote/stable")
def test_profile_requires(self):
client = TestClient()
self._create(client)
client.save({CONANFILE: lib_conanfile,
"profile.txt": profile,
"profile2.txt": profile2}, clean_first=True)
client.run("export . lasote/stable")
client.run("install MyLib/0.1@lasote/stable --profile ./profile.txt --build missing")
self.assertIn("Hello World!", client.out)
self.assertIn("MyLib/0.1@lasote/stable: Hello world from python tool!", client.out)
client.run("install MyLib/0.1@lasote/stable --profile ./profile2.txt --build")
self.assertIn("Hello World!", client.out)
self.assertIn("MyLib/0.1@lasote/stable: Hello world from python tool!", client.out)
def test_profile_open_requires(self):
client = TestClient()
self._create(client)
client.save({CONANFILE: lib_conanfile,
"profile.txt": profile}, clean_first=True)
client.run("install . --profile ./profile.txt --build missing")
self.assertNotIn("Hello World!", client.out)
client.run("build .")
self.assertIn("Hello World!", client.out)
self.assertIn("conanfile.py (MyLib/0.1): Hello world from python tool!",
client.out)
def test_build_mode_requires(self):
client = TestClient()
self._create(client)
client.save({CONANFILE: lib_conanfile,
"profile.txt": profile}, clean_first=True)
client.run("install . --profile ./profile.txt", assert_error=True)
self.assertIn("ERROR: Missing prebuilt package for "
"'PythonTool/0.1@lasote/stable', 'Tool/0.1@lasote/stable'", client.out)
client.run("install . --profile ./profile.txt --build=PythonTool", assert_error=True)
self.assertIn("ERROR: Missing prebuilt package for 'Tool/0.1@lasote/stable'", client.out)
client.run("install . --profile ./profile.txt --build=*Tool")
self.assertIn("Tool/0.1@lasote/stable: Generated conaninfo.txt", client.out)
self.assertIn("PythonTool/0.1@lasote/stable: Generated conaninfo.txt", client.out)
# now remove packages, ensure --build=missing also creates them
client.run('remove "*" -p -f')
client.run("install . --profile ./profile.txt --build=missing")
self.assertIn("Tool/0.1@lasote/stable: Generated conaninfo.txt", client.out)
self.assertIn("PythonTool/0.1@lasote/stable: Generated conaninfo.txt", client.out)
def test_profile_test_requires(self):
client = TestClient()
self._create(client)
test_conanfile = """
import os
from conans import ConanFile, tools
class TestMyLib(ConanFile):
requires = "MyLib/0.1@lasote/stable"
def build(self):
self.run("mytool")
import mypythontool
self.output.info(mypythontool.tool_hello_world())
def test(self):
pass
"""
client.save({CONANFILE: lib_conanfile,
"test_package/conanfile.py": test_conanfile,
"profile.txt": profile,
"profile2.txt": profile2}, clean_first=True)
client.run("create . lasote/stable --profile ./profile.txt --build missing")
self.assertEqual(2, str(client.out).splitlines().count("Hello World!"))
self.assertIn("MyLib/0.1@lasote/stable: Hello world from python tool!", client.out)
self.assertIn("MyLib/0.1@lasote/stable (test package): Hello world from python tool!",
client.out)
def test_consumer_patterns(self):
client = TestClient()
self._create(client)
test_conanfile = """
import os
from conans import ConanFile, tools
class TestMyLib(ConanFile):
requires = "MyLib/0.1@lasote/stable"
def build(self):
self.run("mytool")
def test(self):
pass
"""
lib_conanfile = """
import os
from conans import ConanFile, tools
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
def build(self):
import mypythontool
self.output.info(mypythontool.tool_hello_world())
"""
profile_patterns = """
[build_requires]
&: Tool/0.1@lasote/stable
&!: PythonTool/0.1@lasote/stable
nonexistingpattern*: SomeTool/1.2@user/channel
"""
client.save({CONANFILE: lib_conanfile,
"test_package/conanfile.py": test_conanfile,
"profile.txt": profile_patterns}, clean_first=True)
client.run("create . lasote/stable --profile=./profile.txt --build=missing")
self.assertEqual(1, str(client.out).splitlines().count("Hello World!"))
self.assertIn("MyLib/0.1@lasote/stable: Hello world from python tool!", client.out)
self.assertNotIn("Project: Hello world from python tool!", client.out)
def test_build_requires_options(self):
client = TestClient()
client.save({CONANFILE: GenConanfile("MyTool", "0.1")})
client.run("export . lasote/stable")
conanfile = """
from conans import ConanFile, tools
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
build_requires = "MyTool/0.1@lasote/stable"
options = {"coverage": [True, False]}
def build(self):
self.output.info("Coverage %s" % self.options.coverage)
"""
client.save({CONANFILE: conanfile}, clean_first=True)
client.run("install . -o MyLib:coverage=True --build missing")
self.assertIn("MyTool/0.1@lasote/stable from local cache", client.out)
self.assertIn("MyTool/0.1@lasote/stable: Calling build()", client.out)
client.run("build .")
self.assertIn("conanfile.py (MyLib/0.1): Coverage True", client.out)
client.save({CONANFILE: conanfile}, clean_first=True)
client.run("install . -o coverage=True")
self.assertIn("MyTool/0.1@lasote/stable from local cache", client.out)
self.assertIn("MyTool/0.1@lasote/stable: Already installed!", client.out)
client.run("build .")
self.assertIn("conanfile.py (MyLib/0.1): Coverage True", client.out)
|
|
"""
Copyright (c) 2015, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals, absolute_import
import json
import os
from osbs.exceptions import OsbsResponseException
from atomic_reactor.plugins.pre_reactor_config import get_openshift_session, get_koji
from atomic_reactor.plugins.pre_fetch_sources import PLUGIN_FETCH_SOURCES_KEY
from atomic_reactor.constants import (PLUGIN_KOJI_UPLOAD_PLUGIN_KEY,
PLUGIN_VERIFY_MEDIA_KEY,
PLUGIN_RESOLVE_REMOTE_SOURCE,
SCRATCH_FROM)
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.util import get_build_json
class StoreMetadataInOSv3Plugin(ExitPlugin):
key = "store_metadata_in_osv3"
is_allowed_to_fail = False
def __init__(self, tasker, workflow, url=None, verify_ssl=True, use_auth=True):
"""
constructor
:param tasker: ContainerTasker instance
:param workflow: DockerBuildWorkflow instance
:param url: str, URL to OSv3 instance
:param use_auth: bool, initiate authentication with openshift?
"""
# call parent constructor
super(StoreMetadataInOSv3Plugin, self).__init__(tasker, workflow)
self.openshift_fallback = {
'url': url,
'insecure': not verify_ssl,
'auth': {'enable': use_auth}
}
self.source_build = PLUGIN_FETCH_SOURCES_KEY in self.workflow.prebuild_results
def get_result(self, result):
if isinstance(result, Exception):
result = ''
return result
def get_pre_result(self, key):
return self.get_result(self.workflow.prebuild_results.get(key, ''))
def get_post_result(self, key):
return self.get_result(self.workflow.postbuild_results.get(key, ''))
def get_exit_result(self, key):
return self.get_result(self.workflow.exit_results.get(key, ''))
def get_config_map(self):
annotations = self.get_post_result(PLUGIN_KOJI_UPLOAD_PLUGIN_KEY)
if not annotations:
return {}
return annotations
def get_digests(self):
"""
Returns a map of repositories to digests
"""
digests = {} # repository -> digest
for registry in self.workflow.push_conf.docker_registries:
for image in self.workflow.tag_conf.images:
image_str = image.to_str()
if image_str in registry.digests:
digest = registry.digests[image_str]
digests[image.to_str(registry=False)] = digest
return digests
def _get_registries(self):
"""
Return a list of registries that this build updated
"""
return self.workflow.push_conf.all_registries
def get_repositories(self):
# usually repositories formed from NVR labels
# these should be used for pulling and layering
primary_repositories = []
for registry in self._get_registries():
for image in self.workflow.tag_conf.primary_images:
registry_image = image.copy()
registry_image.registry = registry.uri
primary_repositories.append(registry_image.to_str())
# unique unpredictable repositories
unique_repositories = []
for registry in self._get_registries():
for image in self.workflow.tag_conf.unique_images:
registry_image = image.copy()
registry_image.registry = registry.uri
unique_repositories.append(registry_image.to_str())
# floating repositories
# these should be used for pulling and layering
floating_repositories = []
for registry in self._get_registries():
for image in self.workflow.tag_conf.floating_images:
registry_image = image.copy()
registry_image.registry = registry.uri
floating_repositories.append(registry_image.to_str())
return {
"primary": primary_repositories,
"unique": unique_repositories,
"floating": floating_repositories,
}
def get_pullspecs(self, digests):
# v2 registry digests
pullspecs = []
for registry in self._get_registries():
for image in self.workflow.tag_conf.images:
image_str = image.to_str()
if image_str in digests:
digest = digests[image_str]
for digest_version in digest.content_type:
if digest_version not in digest:
continue
pullspecs.append({
"registry": registry.uri,
"repository": image.to_str(registry=False, tag=False),
"tag": image.tag,
"digest": digest[digest_version],
"version": digest_version
})
return pullspecs
def get_plugin_metadata(self):
return {
"errors": self.workflow.plugins_errors,
"timestamps": self.workflow.plugins_timestamps,
"durations": self.workflow.plugins_durations,
}
def get_filesystem_metadata(self):
data = {}
try:
data = self.workflow.fs_watcher.get_usage_data()
self.log.debug("filesystem metadata: %s", data)
except Exception:
self.log.exception("Error getting filesystem stats")
return data
def _update_labels(self, labels, updates):
if updates:
updates = {key: str(value) for key, value in updates.items()}
labels.update(updates)
def make_labels(self):
labels = {}
self._update_labels(labels, self.workflow.labels)
self._update_labels(labels, self.workflow.build_result.labels)
return labels
def set_koji_task_annotations_whitelist(self, annotations):
"""Whitelist annotations to be included in koji task output
Allow annotations whose names are listed in task_annotations_whitelist
koji's configuration to be included in the build_annotations.json file,
which will be attached in the koji task output.
"""
koji_config = get_koji(self.workflow)
whitelist = koji_config.get('task_annotations_whitelist')
if whitelist:
annotations['koji_task_annotations_whitelist'] = json.dumps(whitelist)
def _update_annotations(self, annotations, updates):
if updates:
updates = {key: json.dumps(value) for key, value in updates.items()}
annotations.update(updates)
def apply_build_result_annotations(self, annotations):
self._update_annotations(annotations, self.workflow.build_result.annotations)
def apply_plugin_annotations(self, annotations):
self._update_annotations(annotations, self.workflow.annotations)
def apply_remote_source_annotations(self, annotations):
try:
rs_annotations = self.get_pre_result(PLUGIN_RESOLVE_REMOTE_SOURCE)['annotations']
except (TypeError, KeyError):
return
annotations.update(rs_annotations)
def run(self):
metadata = get_build_json().get("metadata", {})
try:
build_id = metadata["name"]
except KeyError:
self.log.error("malformed build json")
return
self.log.info("build id = %s", build_id)
osbs = get_openshift_session(self.workflow, self.openshift_fallback)
if not self.source_build:
try:
commit_id = self.workflow.source.commit_id
except AttributeError:
commit_id = ""
# for early flatpak failure before it creates Dockerfile and creates dockerfile_images
if self.workflow.builder.dockerfile_images is None:
base_image_name = ""
base_image_id = ""
parent_images_strings = {}
else:
base_image = self.workflow.builder.dockerfile_images.original_base_image
if (base_image is not None and
not self.workflow.builder.dockerfile_images.base_from_scratch):
base_image_name = base_image
try:
base_image_id = self.workflow.builder.base_image_inspect['Id']
except KeyError:
base_image_id = ""
else:
base_image_name = ""
base_image_id = ""
parent_images_strings = self.workflow.builder.parent_images_to_str()
if self.workflow.builder.dockerfile_images.base_from_scratch:
parent_images_strings[SCRATCH_FROM] = SCRATCH_FROM
try:
with open(self.workflow.builder.df_path) as f:
dockerfile_contents = f.read()
except AttributeError:
dockerfile_contents = ""
annotations = {
'repositories': json.dumps(self.get_repositories()),
'digests': json.dumps(self.get_pullspecs(self.get_digests())),
'plugins-metadata': json.dumps(self.get_plugin_metadata()),
'filesystem': json.dumps(self.get_filesystem_metadata()),
}
if self.source_build:
annotations['image-id'] = ''
if self.workflow.koji_source_manifest:
annotations['image-id'] = self.workflow.koji_source_manifest['config']['digest']
else:
annotations['dockerfile'] = dockerfile_contents
annotations['commit_id'] = commit_id
annotations['base-image-id'] = base_image_id
annotations['base-image-name'] = base_image_name
annotations['image-id'] = self.workflow.builder.image_id or ''
annotations['parent_images'] = json.dumps(parent_images_strings)
media_types = []
media_results = self.workflow.exit_results.get(PLUGIN_VERIFY_MEDIA_KEY)
if isinstance(media_results, Exception):
media_results = None
if media_results:
media_types += media_results
if media_types:
annotations['media-types'] = json.dumps(sorted(list(set(media_types))))
tar_path = tar_size = tar_md5sum = tar_sha256sum = None
if len(self.workflow.exported_image_sequence) > 0:
tar_path = self.workflow.exported_image_sequence[-1].get("path")
tar_size = self.workflow.exported_image_sequence[-1].get("size")
tar_md5sum = self.workflow.exported_image_sequence[-1].get("md5sum")
tar_sha256sum = self.workflow.exported_image_sequence[-1].get("sha256sum")
# looks like that openshift can't handle value being None (null in json)
if tar_size is not None and tar_md5sum is not None and tar_sha256sum is not None and \
tar_path is not None:
annotations["tar_metadata"] = json.dumps({
"size": tar_size,
"md5sum": tar_md5sum,
"sha256sum": tar_sha256sum,
"filename": os.path.basename(tar_path),
})
self.apply_remote_source_annotations(annotations)
annotations.update(self.get_config_map())
self.apply_plugin_annotations(annotations)
self.apply_build_result_annotations(annotations)
self.set_koji_task_annotations_whitelist(annotations)
try:
osbs.update_annotations_on_build(build_id, annotations)
except OsbsResponseException:
self.log.debug("annotations: %r", annotations)
raise
labels = self.make_labels()
if labels:
try:
osbs.update_labels_on_build(build_id, labels)
except OsbsResponseException:
self.log.debug("labels: %r", labels)
raise
return {"annotations": annotations, "labels": labels}
|
|
# coding=utf-8
import numpy as np
import scipy.sparse as sprs
from scipy.interpolate import BarycentricInterpolator
def next_neighbors_periodic(p, ps, k):
"""
Function to find the next neighbors for a periodic setup
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
Args:
p: the current point
ps (np.ndarray): the grid with the potential neighbors
k (int): number of neighbors to find
Returns:
list: the k next neighbors
"""
p_bar = p - np.floor(p / 1.0) * 1.0
ps = ps - ps[0]
distance_to_p = np.asarray(
list(map(lambda tk: min([np.abs(tk + 1 - p_bar), np.abs(tk - p_bar), np.abs(tk - 1 - p_bar)]), ps)))
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def next_neighbors(p, ps, k):
"""
Function to find the next neighbors for a non-periodic setup
This function gives for a value p the k points next to it which are found in
in the vector ps
Args:
p: the current point
ps (np.ndarray): the grid with the potential neighbors
k (int): number of neighbors to find
Returns:
list: the k next neighbors
"""
distance_to_p = np.abs(ps - p)
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr, nn):
"""
Function to append an array for nn neighbors for periodicity
Args:
arr (np.ndarray): the input array
nn (list): the neighbors
Returns:
np.ndarray: the continued array
"""
nn = np.asarray(nn)
d_nn = nn[1:] - nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0] - 1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.
for n, d in zip(nn[1:], d_nn):
if d != 1:
shift = -1
cont_arr.append(arr[n] + shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1):
"""
Function to contruct the restriction matrix in 1d using barycentric interpolation
Args:
fine_grid (np.ndarray): a one dimensional 1d array containing the nodes of the fine grid
coarse_grid (np.ndarray): a one dimensional 1d array containing the nodes of the coarse grid
k (int): order of the restriction
periodic (bool): flag to indicate periodicity
pad (int): padding parameter for boundaries
Returns:
sprs.csc_matrix: restriction matrix
"""
n_g = coarse_grid.size
if periodic:
M = np.zeros((coarse_grid.size, fine_grid.size))
for i, p in zip(range(n_g), coarse_grid):
nn = next_neighbors_periodic(p, fine_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(fine_grid, nn)
if p > np.mean(coarse_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
M = np.zeros((coarse_grid.size, fine_grid.size + 2 * pad))
for i, p in zip(range(n_g), coarse_grid):
padded_f_grid = border_padding(fine_grid, pad, pad)
nn = next_neighbors(p, padded_f_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_f_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
if pad > 0:
M = M[:, pad:-pad]
return sprs.csc_matrix(M)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1, equidist_nested=True):
"""
Function to contruct the restriction matrix in 1d using barycentric interpolation
Args:
fine_grid (np.ndarray): a one dimensional 1d array containing the nodes of the fine grid
coarse_grid (np.ndarray): a one dimensional 1d array containing the nodes of the coarse grid
k (int): order of the restriction
periodic (bool): flag to indicate periodicity
pad (int): padding parameter for boundaries
equidist_nested (bool): shortcut possible, if nodes are equidistant and nested
Returns:
sprs.csc_matrix: interpolation matrix
"""
n_f = fine_grid.size
if periodic:
M = np.zeros((fine_grid.size, coarse_grid.size))
if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
if i % 2 == 0:
M[i, int(i / 2)] = 1.0
else:
nn = []
cpos = int(i / 2)
offset = int(k / 2)
for j in range(k):
nn.append(cpos - offset + 1 + j)
if nn[-1] < 0:
nn[-1] += coarse_grid.size
elif nn[-1] > coarse_grid.size - 1:
nn[-1] -= coarse_grid.size
nn = sorted(nn)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
if len(nn) > 0:
cont_arr = continue_periodic_array(coarse_grid, nn)
else:
cont_arr = coarse_grid
if p > np.mean(fine_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
for i, p in zip(range(n_f), fine_grid):
nn = next_neighbors_periodic(p, coarse_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(coarse_grid, nn)
if p > np.mean(fine_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
M = np.zeros((fine_grid.size, coarse_grid.size + 2 * pad))
padded_c_grid = border_padding(coarse_grid, pad, pad)
if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
if i % 2 != 0:
M[i, int((i - 1) / 2) + 1] = 1.0
else:
nn = []
cpos = int(i / 2)
offset = int(k / 2)
for j in range(k):
nn.append(cpos - offset + 1 + j)
if nn[-1] < 0:
nn[-1] += k
elif nn[-1] > coarse_grid.size + 1:
nn[-1] -= k
nn = sorted(nn)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_c_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
for i, p in zip(range(n_f), fine_grid):
nn = next_neighbors(p, padded_c_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_c_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
if pad > 0:
M = M[:, pad:-pad]
return sprs.csc_matrix(M)
def border_padding(grid, l, r, pad_type='mirror'):
"""
Function to pad/embed an array at the boundaries
Args:
grid (np.npdarray): the input array
l: left boundary
r: right boundary
pad_type: type of padding
Returns:
np.npdarray: the padded array
"""
assert l < grid.size and r < grid.size
padded_arr = np.zeros(grid.size + l + r)
if pad_type == 'mirror':
for i in range(l):
padded_arr[i] = 2 * grid[0] - grid[l - i]
for j in range(r):
padded_arr[-j - 1] = 2 * grid[-1] - grid[-r + j - 1]
padded_arr[l:l + grid.size] = grid
return padded_arr
|
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for weekly per project aggregation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests for weekly per project aggregation of
aggregator.projectcounts.
"""
import aggregator
import testcases
import os
import datetime
class WeeklyProjectAggregationTestCase(testcases.ProjectcountsDataTestCase):
"""TestCase for 'weekly' project aggregation functions"""
def test_weekly_csv_non_existing_csv(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W27,28035,28000,28,7',
])
def test_weekly_csv_existing_csv_existing_week(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_existing_week_force(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date,
force_recomputation=True)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,28035,28000,28,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_bad_dates_existing_week(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,26633,26600,26,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_bad_sunday(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,21028,21000,21,7',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_only_bad_dates_no_existing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 6, 30),
datetime.date(2014, 7, 1),
datetime.date(2014, 7, 2),
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 5),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
def test_weekly_csv_existing_csv_only_bad_dates_existing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
self.create_file(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W27,4,5,6,7',
'2014W28,8,9,10,11',
])
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01,2003,2000,2,1',
'2014-07-02': '2014-07-02,3004,3000,3,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
bad_dates = [
datetime.date(2014, 6, 30),
datetime.date(2014, 7, 1),
datetime.date(2014, 7, 2),
datetime.date(2014, 7, 3),
datetime.date(2014, 7, 4),
datetime.date(2014, 7, 5),
datetime.date(2014, 7, 6),
]
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date, bad_dates)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W26,1,2,3,4',
'2014W28,8,9,10,11',
])
def test_weekly_csv_zero_and_missing_data(self):
enwiki_file_abs = os.path.join(self.weekly_dir_abs, 'enwiki.csv')
first_date = datetime.date(2014, 7, 1)
last_date = datetime.date(2014, 7, 7)
csv_data = {
'2014-06-29': '2014-06-29,1,2,3,4',
'2014-06-30': '2014-06-30,1002,1000,1,1',
'2014-07-01': '2014-07-01, 3, 0,2,1',
'2014-07-02': '2014-07-02,3001,3000, ,1',
'2014-07-03': '2014-07-03,4005,4000,4,1',
'2014-07-04': '2014-07-04,5006,5000,5,1',
'2014-07-05': '2014-07-05,6007,6000,6,1',
'2014-07-06': '2014-07-06,7008,7000,7,1',
'2014-07-07': '2014-07-07,5,6,7,8',
}
aggregator.update_weekly_csv(self.data_dir_abs, 'enwiki', csv_data,
first_date, last_date)
self.assert_file_content_equals(enwiki_file_abs, [
'2014W27,26036,26000,29,7',
])
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.pubsub_v1.types import pubsub
from .base import PublisherTransport, DEFAULT_CLIENT_INFO
from .grpc import PublisherGrpcTransport
class PublisherGrpcAsyncIOTransport(PublisherTransport):
"""gRPC AsyncIO backend transport for Publisher.
The service that an application uses to manipulate topics,
and to send messages to a topic.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "pubsub.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "pubsub.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.keepalive_time_ms", 30000),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_topic(self) -> Callable[[pubsub.Topic], Awaitable[pubsub.Topic]]:
r"""Return a callable for the create topic method over gRPC.
Creates the given topic with the given name. See the [resource
name rules]
(https://cloud.google.com/pubsub/docs/admin#resource_names).
Returns:
Callable[[~.Topic],
Awaitable[~.Topic]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_topic" not in self._stubs:
self._stubs["create_topic"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/CreateTopic",
request_serializer=pubsub.Topic.serialize,
response_deserializer=pubsub.Topic.deserialize,
)
return self._stubs["create_topic"]
@property
def update_topic(
self,
) -> Callable[[pubsub.UpdateTopicRequest], Awaitable[pubsub.Topic]]:
r"""Return a callable for the update topic method over gRPC.
Updates an existing topic. Note that certain
properties of a topic are not modifiable.
Returns:
Callable[[~.UpdateTopicRequest],
Awaitable[~.Topic]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_topic" not in self._stubs:
self._stubs["update_topic"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/UpdateTopic",
request_serializer=pubsub.UpdateTopicRequest.serialize,
response_deserializer=pubsub.Topic.deserialize,
)
return self._stubs["update_topic"]
@property
def publish(
self,
) -> Callable[[pubsub.PublishRequest], Awaitable[pubsub.PublishResponse]]:
r"""Return a callable for the publish method over gRPC.
Adds one or more messages to the topic. Returns ``NOT_FOUND`` if
the topic does not exist.
Returns:
Callable[[~.PublishRequest],
Awaitable[~.PublishResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "publish" not in self._stubs:
self._stubs["publish"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/Publish",
request_serializer=pubsub.PublishRequest.serialize,
response_deserializer=pubsub.PublishResponse.deserialize,
)
return self._stubs["publish"]
@property
def get_topic(self) -> Callable[[pubsub.GetTopicRequest], Awaitable[pubsub.Topic]]:
r"""Return a callable for the get topic method over gRPC.
Gets the configuration of a topic.
Returns:
Callable[[~.GetTopicRequest],
Awaitable[~.Topic]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_topic" not in self._stubs:
self._stubs["get_topic"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/GetTopic",
request_serializer=pubsub.GetTopicRequest.serialize,
response_deserializer=pubsub.Topic.deserialize,
)
return self._stubs["get_topic"]
@property
def list_topics(
self,
) -> Callable[[pubsub.ListTopicsRequest], Awaitable[pubsub.ListTopicsResponse]]:
r"""Return a callable for the list topics method over gRPC.
Lists matching topics.
Returns:
Callable[[~.ListTopicsRequest],
Awaitable[~.ListTopicsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_topics" not in self._stubs:
self._stubs["list_topics"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/ListTopics",
request_serializer=pubsub.ListTopicsRequest.serialize,
response_deserializer=pubsub.ListTopicsResponse.deserialize,
)
return self._stubs["list_topics"]
@property
def list_topic_subscriptions(
self,
) -> Callable[
[pubsub.ListTopicSubscriptionsRequest],
Awaitable[pubsub.ListTopicSubscriptionsResponse],
]:
r"""Return a callable for the list topic subscriptions method over gRPC.
Lists the names of the attached subscriptions on this
topic.
Returns:
Callable[[~.ListTopicSubscriptionsRequest],
Awaitable[~.ListTopicSubscriptionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_topic_subscriptions" not in self._stubs:
self._stubs["list_topic_subscriptions"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/ListTopicSubscriptions",
request_serializer=pubsub.ListTopicSubscriptionsRequest.serialize,
response_deserializer=pubsub.ListTopicSubscriptionsResponse.deserialize,
)
return self._stubs["list_topic_subscriptions"]
@property
def list_topic_snapshots(
self,
) -> Callable[
[pubsub.ListTopicSnapshotsRequest], Awaitable[pubsub.ListTopicSnapshotsResponse]
]:
r"""Return a callable for the list topic snapshots method over gRPC.
Lists the names of the snapshots on this topic. Snapshots are
used in
`Seek <https://cloud.google.com/pubsub/docs/replay-overview>`__
operations, which allow you to manage message acknowledgments in
bulk. That is, you can set the acknowledgment state of messages
in an existing subscription to the state captured by a snapshot.
Returns:
Callable[[~.ListTopicSnapshotsRequest],
Awaitable[~.ListTopicSnapshotsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_topic_snapshots" not in self._stubs:
self._stubs["list_topic_snapshots"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/ListTopicSnapshots",
request_serializer=pubsub.ListTopicSnapshotsRequest.serialize,
response_deserializer=pubsub.ListTopicSnapshotsResponse.deserialize,
)
return self._stubs["list_topic_snapshots"]
@property
def delete_topic(
self,
) -> Callable[[pubsub.DeleteTopicRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete topic method over gRPC.
Deletes the topic with the given name. Returns ``NOT_FOUND`` if
the topic does not exist. After a topic is deleted, a new topic
may be created with the same name; this is an entirely new topic
with none of the old configuration or subscriptions. Existing
subscriptions to this topic are not deleted, but their ``topic``
field is set to ``_deleted-topic_``.
Returns:
Callable[[~.DeleteTopicRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_topic" not in self._stubs:
self._stubs["delete_topic"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/DeleteTopic",
request_serializer=pubsub.DeleteTopicRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_topic"]
@property
def detach_subscription(
self,
) -> Callable[
[pubsub.DetachSubscriptionRequest], Awaitable[pubsub.DetachSubscriptionResponse]
]:
r"""Return a callable for the detach subscription method over gRPC.
Detaches a subscription from this topic. All messages retained
in the subscription are dropped. Subsequent ``Pull`` and
``StreamingPull`` requests will return FAILED_PRECONDITION. If
the subscription is a push subscription, pushes to the endpoint
will stop.
Returns:
Callable[[~.DetachSubscriptionRequest],
Awaitable[~.DetachSubscriptionResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "detach_subscription" not in self._stubs:
self._stubs["detach_subscription"] = self.grpc_channel.unary_unary(
"/google.pubsub.v1.Publisher/DetachSubscription",
request_serializer=pubsub.DetachSubscriptionRequest.serialize,
response_deserializer=pubsub.DetachSubscriptionResponse.deserialize,
)
return self._stubs["detach_subscription"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM access control policy on the specified
function. Replaces any existing policy.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM access control policy for a function.
Returns an empty policy if the function exists and does
not have a policy set.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests the specified permissions against the IAM access control
policy for a function. If the function does not exist, this will
return an empty set of permissions, not a NOT_FOUND error.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.iam.v1.IAMPolicy/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("PublisherGrpcAsyncIOTransport",)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cqlhandling import CqlParsingRuleSet, Hint
from cassandra.metadata import maybe_escape_name
from cassandra.metadata import escape_name
simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'decimal', 'double', 'float', 'inet', 'int',
'text', 'timestamp', 'timeuuid', 'uuid', 'varchar', 'varint'))
simple_cql_types.difference_update(('set', 'map', 'list'))
from . import helptopics
cqldocs = helptopics.CQL3HelpTopics()
try:
import json
except ImportError:
import simplejson as json
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_traces', 'system_auth')
NONALTERBALE_KEYSPACES = ('system')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering',
'token', 'writetime', 'map', 'list', 'to', 'custom', 'if', 'not'
))
unreserved_keywords = set((
'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values', 'custom', 'exists'
))
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('min_index_interval', None),
('max_index_interval', None),
('read_repair_chance', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
('caching', None,
('rows_per_partition', 'keys')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
maybe_escape_name = staticmethod(maybe_escape_name)
escape_name = staticmethod(escape_name)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
name = name[1:-1].replace('""', '"')
return name
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
shorthands = ('completer_for', 'explain_completion',
'dequote_value', 'dequote_name',
'escape_value',
'maybe_escape_name')
for shorthand in shorthands:
globals()[shorthand] = getattr(CqlRuleSet, shorthand)
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <collectionLiteral>
| <functionName> <functionArguments>
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<functionName> ::= ( <identifier> ":" ":" )? <identifier>
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <createUserTypeStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <dropUserTypeStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
| <alterUserTypeStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
;
<authorizationStatement> ::= <grantStatement>
| <revokeStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | <K_TIMESTAMP> ) ;
<userType> ::= utname=<cfOrKsName> ;
<storageType> ::= <simpleStorageType> | <collectionType> | <userType> ;
<collectionType> ::= "map" "<" <simpleStorageType> "," ( <simpleStorageType> | <userType> ) ">"
| "list" "<" ( <simpleStorageType> | <userType> ) ">"
| "set" "<" ( <simpleStorageType> | <userType> ) ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<userTypeName> ::= ( ksname=<cfOrKsName> dot="." )? utname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( <K_KEY>
| <K_CLUSTERING>
# | <K_COUNT> -- to get count(*) completion, treat count as reserved
| <K_TTL>
| <K_COMPACT>
| <K_STORAGE>
| <K_TYPE>
| <K_VALUES> )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<term>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if this_opt == 'caching':
return ["{'keys': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'min_index_interval', 'max_index_interval'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'caching':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts.add('min_sstable_size')
opts.add('min_threshold')
opts.add('bucket_high')
opts.add('bucket_low')
opts.add('cold_reads_to_omit')
elif csc == 'LeveledCompactionStrategy':
opts.add('sstable_size_in_mb')
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
elif opt == 'caching':
if key == 'rows_per_partition':
return ["'ALL'", "'NONE'", Hint('#rows_per_partition')]
elif key == 'keys':
return ["'ALL'", "'NONE'"]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
completer_for('columnFamilyName', 'ksname')(cf_ks_name_completer)
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
completer_for('columnFamilyName', 'dot')(cf_ks_dot_completer)
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
completer_for('userTypeName', 'ksname')(cf_ks_name_completer)
completer_for('userTypeName', 'dot')(cf_ks_dot_completer)
def ut_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
utnames = cass.get_usertype_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, utnames)
completer_for('userTypeName', 'utname')(ut_name_completer)
completer_for('userType', 'utname')(ut_name_completer)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_table_meta(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_table_meta(ks, cf)
def get_ut_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
ut = dequote_name(ctxt.get_binding('utname'))
return cass.get_usertype_layout(ks, ut)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" <selectClause>
"FROM" cf=<columnFamilyName>
( "WHERE" <whereClause> )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "LIMIT" limit=<wholenumber> )?
( "ALLOW" "FILTERING" )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "=" | "<" | ">" | "<=" | ">=" | "CONTAINS" ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=" | "CONTAINS") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
| "COUNT" "(" star=( "*" | "1" ) ")" ("AS" <cident>)?
;
<selector> ::= [colname]=<cident>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| <functionName> <selectionFunctionArguments>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
'''
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_table_meta(ctxt, cass)
order_by_candidates = [col.name for col in layout.clustering_key]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return [key.name for key in layout.partition_key]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
filterable = set((layout.partition_key[0].name, layout.clustering_key[0].name))
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs', ()))
for num in range(1, len(layout.partition_key)):
if layout.partition_key[num - 1].name in already_filtered_on:
filterable.add(layout.partition_key[num].name)
else:
break
for num in range(1, len(layout.clustering_key)):
if layout.clustering_key[num - 1].name in already_filtered_on:
filterable.add(layout.clustering_key[num].name)
else:
break
for cd in layout.columns.values():
if cd.index:
filterable.add(cd.name)
return map(maybe_escape_name, filterable)
@completer_for('selectClause', 'star')
def select_count_star_completer(ctxt, cass):
return ['*']
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" [colname]=<cident> "," [colname]=<cident>
( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<term> valcomma="," [newval]=<term>
( valcomma="," [newval]=<term> )* valcomma=")"
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
def regular_column_names(table_meta):
if not table_meta or not table_meta.columns:
return []
regular_coulmns = list(set(table_meta.columns.keys())
- set([key.name for key in table_meta.partition_key])
- set([key.name for key in table_meta.clustering_key]))
return regular_coulmns
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key
for k in keycols:
if k.name not in colnames:
return [maybe_escape_name(k.name)]
normalcols = set(regular_column_names(layout)) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
cqltype.cql_parameterized_type()))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
;
<assignment> ::= updatecol=<cident>
( "=" update_rhs=( <value> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )
| indexbracket="[" <term> "]" "=" <term> )
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % cqltype.cql_parameterized_type())]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.columns[curcol].data_type.typename == 'counter' else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.columns[curcol].data_type.typename == 'counter':
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].data_type.typename
if coltype in ('map', 'list'):
return ['[']
return []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
;
<deleteSelector> ::= delcol=<cident> ( memberbracket="[" memberselector=<term> "]" )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<identifier>? "ON"
cf=<columnFamilyName> "(" col=<cident> ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
<createUserTypeStatement> ::= "CREATE" "TYPE" ( ks=<nonSystemKeyspaceName> dot="." )? typename=<cfOrKsName> "(" newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
")"
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
explain_completion('createUserTypeStatement', 'typename', '<new_type_name>')
explain_completion('createUserTypeStatement', 'newcol', '<new_field_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = [cd.name for cd in layout.columns.values() if not cd.index]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<indexName> ::= ( ksname=<idxOrKsName> dot="." )? idxname=<idxOrKsName> ;
<idxOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? idx=<indexName>
;
<dropUserTypeStatement> ::= "DROP" "TYPE" ut=<userTypeName>
;
'''
@completer_for('indexName', 'ksname')
def idx_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('indexName', 'dot')
def idx_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('indexName', 'idxname')
def idx_ks_idx_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
idxnames = cass.get_index_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, idxnames)
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
<alterTypeInstructions>
;
<alterTypeInstructions> ::= "RENAME" "TO" typename=<cfOrKsName>
| "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "RENAME" existcol=<cident> "TO" newcol=<cident>
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
cols = [str(md) for md in layout.columns]
return map(maybe_escape_name, cols)
@completer_for('alterTypeInstructions', 'existcol')
def alter_type_field_completer(ctxt, cass):
layout = get_ut_layout(ctxt, cass)
fields = [tuple[0] for tuple in layout]
return map(maybe_escape_name, fields)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
explain_completion('alterTypeInstructions', 'typename', '<new_type_name>')
explain_completion('alterTypeInstructions', 'newcol', '<new_field_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" ( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" ( "IF" "EXISTS" )? <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <username>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <username>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <username> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<username>')]
session = cass.session
return [maybe_quote(row[0].replace("'", "''")) for row in session.execute("LIST USERS")]
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
from cassandra.cqltypes import lookup_casstype
class UserTypesMeta(object):
_meta = {}
def __init__(self, meta):
self._meta = meta
@classmethod
def from_layout(cls, layout):
result = {}
for row in layout:
ksname = row.keyspace_name
if ksname not in result:
result[ksname] = {}
utname = row.type_name
result[ksname][utname] = zip(row.field_names, row.field_types)
return cls(meta=result)
def get_usertypes_names(self, keyspace):
return map(str, self._meta.get(keyspace, {}).keys())
def get_field_names(self, keyspace, type):
return [row[0] for row in self._meta.get(keyspace, {}).get(type, [])]
def get_fields_with_types(self, ksname, typename):
return [(field[0], lookup_casstype(field[1]).cql_parameterized_type()) for field in
self._meta.get(ksname, {}).get(typename, [])]
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to plot the results and diagnostics of a TrimmedMatch design."""
from typing import Dict, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from trimmed_match.design import common_classes
from trimmed_match.design import util
TimeWindow = common_classes.TimeWindow
def plot_candidate_design_rmse(
response: str, num_pairs: int,
results: pd.DataFrame) -> Dict[Tuple[float, float], plt.Axes]:
"""Plot the RMSE curve for a set of candidate designs.
Args:
response: str, primary response variable used for the design.
num_pairs: int, total number of pairs in the design.
results: pd.DataFrame, with columns (pair_index,
experiment_response, experiment_spend, spend_response_ratio, budget,
iroas, rmse, proportion_cost_in_experiment).
Returns:
axes_dict: a dictionary with keys (budget, iroas) with the plot of the
RMSE values for the design with corresponding budget and iROAS as a
function of the number of excluded pairs.
"""
budget_list = results['budget'].unique()
iroas_list = results['iroas'].unique()
axes_dict = {}
for budget in budget_list:
for iroas in iroas_list:
result = results[(results['budget'] == budget)
& (results['iroas'] == iroas)].reset_index(drop=True)
hlines = range(
0,
int(max(max(result['rmse']), max(result['rmse_cost_adjusted']))) + 1)
delta = (max(result['rmse']) - min(result['rmse'])) * 0.05
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1)
ax.plot(
result['pair_index'], result['rmse'], 'blue', label='RMSE')
ax.plot(
result['pair_index'],
result['rmse_cost_adjusted'],
'red',
label='Cost adjusted RMSE')
ax.set_xlim(
min(result['pair_index']) - 1,
max(result['pair_index']) + 1)
ax.set_ylim(
min(result.rmse) - delta,
max(result.rmse_cost_adjusted) + delta)
ax.legend()
ax.hlines(
y=hlines,
xmin=min(result['pair_index']),
xmax=max(result['pair_index']),
colors='gray',
linestyles='dashed')
for _, row in result.iterrows():
ax.text(
row.pair_index + 1, row.rmse + delta,
'{}'.format(
util.human_readable_number(row.experiment_response)))
ax.set_xlabel('Pairing number')
ax.set_ylabel('RMSE')
ax.set_title(
f'''RMSE of iROAS w.r.t. {response} (total pairs: {num_pairs})''')
axes_dict[(budget, iroas)] = fig
plt.close()
return axes_dict
def output_chosen_design(
pretest_data: pd.DataFrame,
geo_level_eval_data: pd.DataFrame,
response: str,
spend_proxy: str,
time_window_for_eval: TimeWindow,
group_control: int = common_classes.GeoAssignment.CONTROL,
group_treatment: int = common_classes.GeoAssignment.TREATMENT
) -> np.ndarray:
"""Plot the comparison between treatment and control of a candidate design.
Args:
pretest_data: pd.DataFrame (date, geo, ...).
geo_level_eval_data: a pd.DataFrame with columns (geo, response, spend,
pair)
response: str, column name used as response in the design.
spend_proxy: str, column used as spend proxy in the design.
time_window_for_eval: TimeWindow, representing the time period of pretest
data used for evaluation of RMSE in estimating iROAS.
group_control: value representing the control group in the data.
group_treatment: value representing the treatment group in the data.
Returns:
an array of subplots containing the scatterplot and time series comparison
for the response and spend of the two groups.
"""
geo_treatment = geo_level_eval_data[geo_level_eval_data['assignment'] ==
group_treatment]
geo_control = geo_level_eval_data[geo_level_eval_data['assignment'] ==
group_control]
treatment_geo = geo_treatment['geo'].to_list()
control_geo = geo_control['geo'].to_list()
treatment_time_series = pretest_data[pretest_data['geo'].isin(
treatment_geo)].groupby(
'date', as_index=False)[[response, spend_proxy]].sum()
control_time_series = pretest_data[pretest_data['geo'].isin(
control_geo)].groupby(
'date', as_index=False)[[response, spend_proxy]].sum()
_, axes = plt.subplots(2, 2, figsize=(15, 7.5))
sns.regplot(
x=np.sqrt(geo_treatment['response']),
y=np.sqrt(geo_control['response']),
ax=axes[0, 0],
fit_reg=False)
axes[0, 0].set_title(response + ' (in square root)')
axes[0, 0].set_xlabel('treatment')
axes[0, 0].set_ylabel('control')
lim = np.sqrt([
min([min(geo_control['response']),
min(geo_treatment['response'])]) * 0.97,
max([max(geo_control['response']),
max(geo_treatment['response'])]) * 1.03
])
axes[0, 0].plot(lim, lim, linestyle='--', color='gray')
axes[0, 0].set_xlim(lim)
axes[0, 0].set_ylim(lim)
sns.regplot(
x=np.sqrt(geo_treatment['spend']),
y=np.sqrt(geo_control['spend']),
ax=axes[0, 1],
fit_reg=False)
axes[0, 1].set_title(spend_proxy + ' (in square root)')
axes[0, 1].set_xlabel('treatment')
axes[0, 1].set_ylabel('control')
lim = np.sqrt([
min([min(geo_control['spend']),
min(geo_treatment['spend'])]) * 0.97,
max([max(geo_control['spend']),
max(geo_treatment['spend'])]) * 1.03
])
axes[0, 1].plot(lim, lim, linestyle='--', color='gray')
axes[0, 1].set_xlim(lim)
axes[0, 1].set_ylim(lim)
treatment_time_series.plot(
x='date',
y=response,
color='black',
label='treatment',
ax=axes[1, 0])
control_time_series.plot(
x='date', y=response, color='red', label='control', ax=axes[1, 0])
axes[1, 0].axvline(
x=time_window_for_eval.first_day,
color='blue',
ls='-',
label='evaluation window')
axes[1, 0].axvline(x=time_window_for_eval.last_day, color='blue', ls='-')
axes[1, 0].legend()
axes[1, 0].set_ylabel(response)
axes[1, 0].set_xlabel('date')
treatment_time_series.plot(
x='date',
y=spend_proxy,
color='black',
label='treatment',
ax=axes[1, 1])
control_time_series.plot(
x='date', y=spend_proxy, color='red', label='control', ax=axes[1, 1])
axes[1, 1].axvline(
x=time_window_for_eval.first_day,
color='blue',
ls='-',
label='evaluation window')
axes[1, 1].axvline(x=time_window_for_eval.last_day, color='blue', ls='-')
axes[1, 1].legend()
axes[1, 1].set_ylabel(spend_proxy)
axes[1, 1].set_xlabel('date')
return axes
def plot_paired_comparison(
pretest_data: pd.DataFrame, geo_level_eval_data: pd.DataFrame,
response: str,
time_window_for_design: TimeWindow,
time_window_for_eval: TimeWindow,
group_control: int = common_classes.GeoAssignment.CONTROL,
group_treatment: int = common_classes.GeoAssignment.TREATMENT,
legend_location: str = 'best'
) -> sns.FacetGrid:
"""Plot the time series of the response variable for each pair.
Args:
pretest_data: pd.DataFrame (date, geo, ...).
geo_level_eval_data: a pd.DataFrame with columns (geo, response, spend,
pair)
response: str, column name used as response in the design.
time_window_for_design: TimeWindow, representing the time period of
pretest data used for the design (training + eval).
time_window_for_eval: TimeWindow, representing the time period of pretest
data used for evaluation of RMSE in estimating iROAS.
group_control: value representing the control group in the data.
group_treatment: value representing the treatment group in the data.
legend_location: location to place the legend in each plot. Acceptable
values are of the form 'upper left', 'lower right', etc.; see the
documentation at
https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.legend.html#matplotlib.pyplot.legend.
Returns:
g: sns.FacetGrid containing one axis for each pair of geos. Each axis
contains the time series plot of the response variable for the
treated geo vs the control geo for a particular pair in the design.
"""
experiment_geo_list = geo_level_eval_data['geo'].to_list()
geos_assigned = geo_level_eval_data[['geo', 'pair', 'assignment']]
temporary = pretest_data[pretest_data['geo'].isin(
experiment_geo_list)]
temporary = temporary[temporary['date'].between(
time_window_for_design.first_day, time_window_for_design.last_day)]
data_to_plot = pd.merge(
temporary,
geo_level_eval_data[['geo', 'pair', 'assignment']],
on='geo',
how='left')
g = sns.FacetGrid(
data_to_plot,
col='pair',
hue='assignment',
col_wrap=2,
sharey=False,
sharex=False,
legend_out=False,
height=3,
aspect=2)
g = (g.map(plt.plot, 'date', response).add_legend())
pair_list = sorted(geos_assigned['pair'].unique())
for ind in range(len(g.axes)):
pair = geos_assigned['pair'] == pair_list[ind]
cont = geos_assigned[
pair
& (geos_assigned['assignment'] == group_control)]['geo'].values[0]
treat = geos_assigned[
pair
& (geos_assigned['assignment'] == group_treatment)]['geo'].values[0]
g.axes[ind].axvline(x=time_window_for_eval.last_day, color='black', ls='-')
g.axes[ind].axvline(x=time_window_for_design.last_day, color='red', ls='--')
g.axes[ind].axvline(x=time_window_for_eval.first_day, color='black', ls='-')
g.axes[ind].axvline(
x=time_window_for_design.first_day, color='red', ls='--')
g.axes[ind].legend([
'control' + ' (geo {})'.format(cont), 'treatment' +
' (geo {})'.format(treat), 'Evaluation period', 'Training period'
],
loc=legend_location)
return g
|
|
import datetime
import re
from string import capwords
from typing import List, Optional, Tuple, Union
from loguru import logger
from flexget.utils.qualities import Quality
logger = logger.bind(name='parser')
SERIES_ID_TYPES = ['ep', 'date', 'sequence', 'id']
def clean_value(name: str) -> str:
for char in '[]()_,.':
name = name.replace(char, ' ')
# if there are no spaces
if name.find(' ') == -1:
name = name.replace('-', ' ')
# MovieParser.strip_spaces
name = ' '.join(name.split())
return name
def old_assume_quality(guessed_quality: Quality, assumed_quality: Quality) -> Quality:
if assumed_quality:
if not guessed_quality:
return assumed_quality
if assumed_quality.resolution:
guessed_quality.resolution = assumed_quality.resolution
if assumed_quality.source:
guessed_quality.source = assumed_quality.source
if assumed_quality.codec:
guessed_quality.codec = assumed_quality.codec
if assumed_quality.audio:
guessed_quality.audio = assumed_quality.audio
return guessed_quality
def remove_dirt(name: str) -> str:
if name:
name = re.sub(r'[_.,\[\]\(\): ]+', ' ', name).strip().lower()
return name
def normalize_name(name: str) -> str:
name = capwords(name)
return name
class MovieParseResult:
def __init__(
self,
data: str = None,
name: str = None,
year: Optional[int] = None,
quality: Quality = None,
proper_count: int = 0,
release_group: Optional[str] = None,
valid: bool = True,
) -> None:
self.name: str = name
self.data: str = data
self.year: Optional[int] = year
self.quality: Quality = quality if quality is not None else Quality()
self.proper_count: int = proper_count
self.release_group: Optional[str] = release_group
self.valid: bool = valid
@property
def identifier(self) -> str:
if self.name and self.year:
return ('%s %s' % (self.name, self.year)).strip().lower()
elif self.name:
return self.name.lower()
@property
def proper(self) -> bool:
return self.proper_count > 0
@property
def fields(self) -> dict:
"""
Return a dict of all parser fields
"""
return {
'id': self.identifier,
'movie_parser': self,
'movie_name': self.name,
'movie_year': self.year,
'proper': self.proper,
'proper_count': self.proper_count,
'release_group': self.release_group,
}
def __str__(self) -> str:
valid = 'OK' if self.valid else 'INVALID'
return (
'<MovieParseResult(data=%s,name=%s,year=%s,id=%s,quality=%s,proper=%s,release_group=%s,status=%s)>'
% (
self.data,
self.name,
self.year,
self.identifier,
self.quality,
self.proper_count,
self.release_group,
valid,
)
)
class SeriesParseResult:
def __init__(
self,
data: str = None,
name: str = None,
identified_by: str = None,
id_type: str = None,
id: Union[Tuple[int, int], str, int, datetime.date] = None,
episodes: int = 1,
season_pack: bool = False,
strict_name: bool = False,
quality: Quality = None,
proper_count: int = 0,
special: bool = False,
group: Optional[str] = None,
valid: bool = True,
) -> None:
self.name: str = name
self.data: str = data
self.episodes: int = episodes
self.season_pack: bool = season_pack
self.identified_by: str = identified_by
self.id: Union[Tuple[int, int], str, int, datetime.date] = id
self.id_type: str = id_type
self.quality: Quality = quality if quality is not None else Quality()
self.proper_count: int = proper_count
self.special: bool = special
self.group: Optional[str] = group
self.valid: bool = valid
self.strict_name: bool = strict_name
@property
def proper(self) -> bool:
return self.proper_count > 0
@property
def season(self) -> Optional[int]:
if self.id_type == 'ep':
return self.id[0]
if self.id_type == 'date':
return self.id.year
if self.id_type == 'sequence':
return 0
return None
@property
def episode(self) -> Optional[int]:
if self.id_type == 'ep':
return self.id[1]
if self.id_type == 'sequence':
return self.id
return None
@property
def identifiers(self) -> List[str]:
"""Return all identifiers this parser represents. (for packs)"""
# Currently 'ep' is the only id type that supports packs
if not self.valid:
raise Exception('Series flagged invalid')
if self.id_type == 'ep':
if self.season_pack:
return ['S%02d' % self.season]
return ['S%02dE%02d' % (self.season, self.episode + x) for x in range(self.episodes)]
elif self.id_type == 'date':
return [self.id.strftime('%Y-%m-%d')]
if self.id is None:
raise Exception('Series is missing identifier')
else:
return [self.id]
@property
def identifier(self) -> str:
"""Return String identifier for parsed episode, eg. S01E02
(will be the first identifier if this is a pack)
"""
return self.identifiers[0]
@property
def pack_identifier(self) -> str:
"""Return a combined identifier for the whole pack if this has more than one episode."""
# Currently only supports ep mode
if self.id_type == 'ep':
if self.episodes > 1:
return 'S%02dE%02d-E%02d' % (
self.season,
self.episode,
self.episode + self.episodes - 1,
)
else:
return self.identifier
else:
return self.identifier
def __str__(self) -> str:
valid = 'OK' if self.valid else 'INVALID'
return (
'<SeriesParseResult(data=%s,name=%s,id=%s,season=%s,season_pack=%s,episode=%s,quality=%s,proper=%s,'
'special=%s,status=%s)>'
% (
self.data,
self.name,
str(self.id),
self.season,
self.season_pack,
self.episode,
self.quality,
self.proper_count,
self.special,
valid,
)
)
|
|
# 3D display code for stereo camera preview
# Written by Hugh Fisher, CECS ANU, 2011
# Distributed under MIT/X11 license: see file COPYING
# The renderer displays the two video sources.
# Current options are side by side, overlaid at
# 50% alpha each, or anaglyph (red-blue) stereo
# Real quad buffered stereo might be nice, but
# this app was designed for use in the field on
# a laptop.
from __future__ import division, print_function
import sys, math
import wx
from wx.glcanvas import *
import pygst, gst
import OpenGL
from OpenGL import GL
from OpenGL.GL import *
from canvas3d import Canvas3D
import app
from app import _
import gpu, gstvideo, videotexture
from videotexture import *
# Because these integers get stored in app prefs,
# never add new ids before them.
MYID_SPLIT = wx.ID_HIGHEST + 1
MYID_BLENDED = MYID_SPLIT + 1
MYID_ANAGLYPH = MYID_BLENDED + 1
MYID_FULLSCREEN = MYID_ANAGLYPH + 1
MYID_SHOW_LEFT = MYID_FULLSCREEN + 1
MYID_SHOW_RIGHT = MYID_SHOW_LEFT + 1
class StereoFrame(Canvas3D):
"""Display frame stereo image pair"""
def __init__(self, parent, size = None, attribs = None, id = wx.ID_ANY):
Canvas3D.__init__(self, parent, size, attribs, id)
self.window = parent
self.prevKey = None
# Streams, to be supplied
self.left = None
self.right = None
self.pipeline= None
# Single, side by side or overlay view
self.mono = True # Automatic if only one stream, no preference
self.overlay = eval(app.config.Read("overlay", "0"))
self.bkColor = (0.0, 0.0, 0.0) # Background color
# Internal layout
self.BORDER = 0.1
def stopVideo(self):
"""Shut down GStreamer, save prefs"""
if self.left:
self.left.stop()
if self.right:
self.right.stop()
app.config.Write("overlay", repr(self.overlay))
def addMenuItems(self, menu):
"""Our view controls"""
self.menu = menu
menu.Append(MYID_FULLSCREEN, _("Full screen\tctrl+f"))
menu.AppendSeparator()
menu.AppendCheckItem(MYID_BLENDED, _("Blended view\tctrl+b"))
menu.AppendCheckItem(MYID_SPLIT, _("Side by side\tctrl+s"))
menu.AppendCheckItem(MYID_ANAGLYPH, _("Anaglyph view\tctrl+a"))
menu.AppendCheckItem(MYID_SHOW_LEFT, _("Left eye\tctrl+l"))
menu.AppendCheckItem(MYID_SHOW_RIGHT, _("Right eye\tctrl+r"))
self.window.Bind(wx.EVT_MENU, self.OnFullScreen, id=MYID_FULLSCREEN)
self.window.Bind(wx.EVT_MENU, self.OnSplit, id=MYID_SPLIT)
self.window.Bind(wx.EVT_MENU, self.OnMerge, id=MYID_BLENDED)
self.window.Bind(wx.EVT_MENU, self.OnAnaglyph, id=MYID_ANAGLYPH)
self.window.Bind(wx.EVT_MENU, self.OnShowLeft, id=MYID_SHOW_LEFT)
self.window.Bind(wx.EVT_MENU, self.OnShowRight, id=MYID_SHOW_RIGHT)
# Immediate update, wx always checks first item
self.OnUpdateMenu(None)
def setVideoStreams(self, left, right, pipeline):
"""Create video streams from chooser dialog values"""
# If only one stream (mono), make it the left
self.left = VideoTexture(left, pipeline)
if not right:
self.mono = True
else:
self.mono = False
self.right = VideoTexture(right, pipeline)
self.positionStreams()
# The video streams update the GL textures automatically,
# but don't force window updates. We'll draw at normal
# speed rather than try to synch to the video frame rate.
self.animate()
self.OnUpdateMenu(None)
def OnFullScreen(self, event):
"""Change to fullscreen mode"""
self.window.ShowFullScreen(not self.window.IsFullScreen(), style=wx.FULLSCREEN_ALL)
def OnSplit(self, event):
"""Switch to split view, with animation"""
self.overlay = 0
self.positionStreams()
self.OnUpdateMenu(None)
def OnMerge(self, event):
"""Switch to blended overlay view"""
self.overlay = MYID_BLENDED
self.positionStreams()
self.OnUpdateMenu(None)
def OnAnaglyph(self, event):
"""Switch to red-blue overlay view"""
self.overlay = MYID_ANAGLYPH
self.positionStreams()
self.OnUpdateMenu(None)
def OnShowLeft(self, event):
"""Toggle left eye visibility"""
if self.left:
self.left.setVisible(not self.left.visible)
self.OnUpdateMenu(None)
def OnShowRight(self, event):
"""Toggle right eye"""
if self.right:
self.right.setVisible(not self.right.visible)
self.OnUpdateMenu(None)
def OnUpdateMenu(self, event):
"""Auto update of menu status"""
if self.mono:
self.menu.Enable(MYID_SPLIT, False)
self.menu.Enable(MYID_BLENDED, False)
self.menu.Enable(MYID_ANAGLYPH, False)
self.menu.Enable(MYID_SHOW_LEFT, False)
self.menu.Enable(MYID_SHOW_RIGHT, False)
else:
self.menu.Enable(MYID_SPLIT, True)
self.menu.Enable(MYID_BLENDED, True)
self.menu.Enable(MYID_ANAGLYPH, True)
self.menu.Enable(MYID_SHOW_LEFT, True)
self.menu.Enable(MYID_SHOW_RIGHT, True)
self.menu.Check(MYID_SPLIT, not self.overlay)
self.menu.Check(MYID_BLENDED, self.overlay == MYID_BLENDED)
self.menu.Check(MYID_ANAGLYPH, self.overlay == MYID_ANAGLYPH)
self.menu.Check(MYID_SHOW_LEFT, self.left and self.left.visible)
self.menu.Check(MYID_SHOW_RIGHT, self.right and self.right.visible)
def key(self, event):
"""Quit on ESC, others ignored"""
ch = event.GetKeyCode()
if ch == wx.WXK_ESCAPE:
if self.prevKey == ch:
self.window.Close()
else:
event.Skip()
self.prevKey = ch
def OnSize(self, event):
Canvas3D.OnSize(self, event)
self.positionStreams(True)
def initGL(self):
Canvas3D.initGL(self)
glClearColor(self.bkColor[0], self.bkColor[1], self.bkColor[2], 0)
glDisable(GL_DEPTH_TEST)
glEnableClientState(GL_VERTEX_ARRAY)
self.initShaders()
def initShaders(self):
gpu.init()
# Flat shader used for overlays
stdVert = gpu.loadShaderFile(GL_VERTEX_SHADER, "std_vert.glsl",
["#version 120"])
flatFrag = gpu.loadShaderFile(GL_FRAGMENT_SHADER, "flat_frag.glsl")
self.flatShader = gpu.newProgram(stdVert, flatFrag)
# The video shaders vary depending on the source
# format (Bayer or non) and output (RGB or red-blue)
# Could recompile shaders on the fly as the output
# changes, but easier to generate all variations here
## RGB shader just uses image as texture
rgbFrag = gpu.loadShaderFile(
GL_FRAGMENT_SHADER, "video_frag.glsl",
["#version 120"])
self.rgbVideo = gpu.newProgram(stdVert, rgbFrag)
h = gpu.getUniform(self.rgbVideo, "image")
glUniform1i(h, 0) # Always GL_TEXTURE0
# RGB anaglyph shader
rgbAnaglyphFrag = gpu.loadShaderFile(
GL_FRAGMENT_SHADER, "video_frag.glsl",
["#version 120", "#define ANAGLYPH"])
self.rgbAnaglyphVideo = gpu.newProgram(stdVert, rgbAnaglyphFrag)
h = gpu.getUniform(self.rgbAnaglyphVideo, "image")
glUniform1i(h, 0)
## Bayer demosaic versions
bayerVert = gpu.loadShaderFile(GL_VERTEX_SHADER, "std_vert.glsl",
["#version 120", "#define DEBAYER"])
bayerFrag = gpu.loadShaderFile(
GL_FRAGMENT_SHADER, "video_frag.glsl",
["#version 120", "#define DEBAYER"])
self.bayerVideo = gpu.newProgram(bayerVert, bayerFrag)
h = gpu.getUniform(self.bayerVideo, "image")
glUniform1i(h, 0)
bayerAnaglyphFrag = gpu.loadShaderFile(
GL_FRAGMENT_SHADER, "video_frag.glsl",
["#version 120", "#define DEBAYER", "#define ANAGLYPH"])
self.bayerAnaglyphVideo = gpu.newProgram(bayerVert, bayerAnaglyphFrag)
h = gpu.getUniform(self.bayerAnaglyphVideo, "image")
glUniform1i(h, 0)
def positionStreams(self, force=False):
"""Position streams within window according to display option"""
if self.overlay == 0:
# Side by side mode
maxFrac = 0.5
if self.left:
self.left.place(-1.0, -0.5, self, force, maxFrac)
if self.right:
self.right.place(0.0, -0.5, self, force, maxFrac)
else:
# Overlay blended/anaglyph
maxFrac = 1.0
if self.left:
self.left.place(-0.5, -0.5, self, force, maxFrac)
if self.right:
self.right.place(-0.5, -0.5, self, force, maxFrac)
def setProjection(self):
"""Version 1.4 is back to ortho. Using pixel coordinates
does something odd with texture coordinate generation
which throws out GPU de-Bayering. No idea why."""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = float(self.width) / float(self.height)
glOrtho(-aspect * 0.5, aspect * 0.5, -0.5, 0.5, -1, 1)
glMatrixMode(GL_MODELVIEW)
def setViewpoint(self):
glLoadIdentity()
def drawSingleStream(self):
"""Draw a single non-stereo stream"""
if self.left.bayer:
gpu.useProgram(self.bayerVideo)
else:
gpu.useProgram(self.rgbVideo)
glDisable(GL_BLEND)
self.left.draw()
def drawSideBySide(self):
"""Side by side view of stereo stream pair"""
if self.left.bayer or self.right.bayer:
gpu.useProgram(self.bayerVideo)
else:
gpu.useProgram(self.rgbVideo)
glDisable(GL_BLEND)
self.left.draw()
self.right.draw()
# Separator
gpu.useProgram(self.flatShader)
glEnableClientState(GL_COLOR_ARRAY)
glVertexPointer(2, GL_FLOAT, 0,
((0, -0.5),(0, 0.5)))
glColorPointer(3, GL_FLOAT, 0,
(self.bkColor, self.bkColor))
glDrawArrays(GL_LINES, 0, 2)
glDisableClientState(GL_COLOR_ARRAY)
def drawBlendedStreams(self):
"""Stream pair each at 50% opacity"""
if self.left.bayer or self.right.bayer:
gpu.useProgram(self.bayerVideo)
else:
gpu.useProgram(self.rgbVideo)
if self.left.visible and self.right.visible:
opacity = 0.5
else:
opacity = 1.0
glEnable(GL_BLEND)
glBlendColor(1.0, 1.0, 1.0, opacity)
glBlendFunc(GL_CONSTANT_ALPHA, GL_ZERO)
self.left.draw()
glBlendColor(1.0, 1.0, 1.0, opacity)
glBlendFunc(GL_CONSTANT_ALPHA, GL_ONE)
self.right.draw()
def drawRedBlueStreams(self):
"""Red-blue stereo view of grayscale"""
if self.left.bayer or self.right.bayer:
gpu.useProgram(self.bayerAnaglyphVideo)
else:
gpu.useProgram(self.rgbAnaglyphVideo)
glDisable(GL_BLEND)
glColorMask(GL_TRUE, GL_FALSE, GL_FALSE, GL_FALSE)
self.left.draw()
glColorMask(GL_FALSE, GL_TRUE, GL_TRUE, GL_FALSE)
self.right.draw()
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE)
def drawWorld(self):
if self.left is None and self.right is None:
return
# There's a bunch of different ways to draw the stream(s)
if self.mono:
self.drawSingleStream()
elif not self.overlay:
self.drawSideBySide()
elif self.overlay == MYID_BLENDED:
self.drawBlendedStreams()
elif self.overlay == MYID_ANAGLYPH:
self.drawRedBlueStreams()
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, live_only
from azure.cli.command_modules.network.zone_file import parse_zone_file
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
class DnsZoneImportTest(ScenarioTest):
def _match_record(self, record_set, name, type):
matches = [x for x in record_set if x['name'] == name and x['type'] == type]
self.assertEqual(len(matches), 1)
return matches[0]
def _list_record_fqdns(self, val):
return tuple([x['fqdn'] for x in val])
def _check_records(self, records1, records2):
self.assertEqual(self._list_record_fqdns(records1), self._list_record_fqdns(records2))
for record in records1:
record_match = self._match_record(records2, record['name'], record['type'])
del record['etag']
del record_match['etag']
try:
self.assertDictEqual(record, record_match)
except AssertionError:
raise
def _test_zone(self, zone_name, filename):
""" This tests that a zone file can be imported, exported, and re-imported without any changes to the
record sets. It does not test that the imported files meet any specific requirements. For that, run
additional checks in the individual zone file tests.
"""
self.kwargs.update({
'zone': zone_name,
'path': os.path.join(TEST_DIR, 'zone_files', filename),
'export': os.path.join(TEST_DIR, 'zone_files', filename + '_export.txt')
})
# Import from zone file
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{path}"')
records1 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# Export zone file and delete the zone
self.cmd('network dns zone export -g {rg} -n {zone} --file-name "{export}"')
self.cmd('network dns zone delete -g {rg} -n {zone} -y')
# Reimport zone file and verify both record sets are equivalent
self.cmd('network dns zone import -n {zone} -g {rg} --file-name "{export}"')
records2 = self.cmd('network dns record-set list -g {rg} -z {zone}').get_output_in_json()
# verify that each record in the original import is unchanged after export/re-import
self._check_records(records1, records2)
@live_only()
@ResourceGroupPreparer(name_prefix='test_dns_import_file_not_found')
def test_dns_import_file_operation_error(self, resource_group):
import sys
if sys.platform != 'linux':
self.skipTest('This test should run on Linux platform')
from azure.cli.core.azclierror import FileOperationError
with self.assertRaisesRegex(FileOperationError, 'No such file: ') as e:
self._test_zone('404zone.com', 'non_existing_zone_description_file.txt')
self.assertEqual(e.errno, 1)
with self.assertRaisesRegex(FileOperationError, 'Is a directory: ') as e:
self._test_zone('404zone.com', '')
self.assertEqual(e.errno, 1)
with self.assertRaisesRegex(FileOperationError, 'Permission denied: ') as e:
self._test_zone('404zone.com', '/root/')
self.assertEqual(e.errno, 1)
@live_only()
@ResourceGroupPreparer(name_prefix='test_dns_import_file_operation_error_windows')
def test_dns_import_file_operation_error_windows(self, resource_group):
import sys
if sys.platform != 'win32':
self.skipTest('This test should run on Windows platform')
from azure.cli.core.azclierror import FileOperationError
with self.assertRaisesRegex(FileOperationError, 'No such file: ') as e:
self._test_zone('404zone.com', 'non_existing_zone_description_file.txt')
self.assertEqual(e.errno, 1)
# Difference with Linux platform while reading a directory
with self.assertRaisesRegex(FileOperationError, 'Permission denied:') as e:
self._test_zone('404zone.com', '.')
self.assertEqual(e.errno, 1)
@ResourceGroupPreparer(name_prefix='cli_dns_zone1_import')
def test_dns_zone1_import(self, resource_group):
self._test_zone('zone1.com', 'zone1.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone2_import')
def test_dns_zone2_import(self, resource_group):
self._test_zone('zone2.com', 'zone2.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone3_import')
def test_dns_zone3_import(self, resource_group):
self._test_zone('zone3.com', 'zone3.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone4_import')
def test_dns_zone4_import(self, resource_group):
self._test_zone('zone4.com', 'zone4.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone5_import')
def test_dns_zone5_import(self, resource_group):
self._test_zone('zone5.com', 'zone5.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone6_import')
def test_dns_zone6_import(self, resource_group):
self._test_zone('zone6.com', 'zone6.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone7_import')
def test_dns_zone7_import(self, resource_group):
self._test_zone('zone7.com', 'zone7.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone8_import')
def test_dns_zone8_import(self, resource_group):
self._test_zone('zone8.com', 'zone8.txt')
@ResourceGroupPreparer(name_prefix='cli_dns_zone9_import')
def test_dns_zone9_import(self, resource_group):
self._test_zone('zone9.com', 'zone9.txt')
class DnsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_dns')
def test_dns(self, resource_group):
self.kwargs['zone'] = 'myzonex.com'
self.cmd('network dns zone list') # just verify is works (no Exception raised)
self.cmd('network dns zone create -n {zone} -g {rg}')
self.cmd('network dns zone list -g {rg}',
checks=self.check('length(@)', 1))
base_record_sets = 2
self.cmd('network dns zone show -n {zone} -g {rg}',
checks=self.check('numberOfRecordSets', base_record_sets))
args = {
'a': '--ipv4-address 10.0.0.10',
'aaaa': '--ipv6-address 2001:db8:0:1:1:1:1:1',
'caa': '--flags 0 --tag foo --value "my value"',
'cname': '--cname mycname',
'mx': '--exchange 12 --preference 13',
'ns': '--nsdname foobar.com',
'ptr': '--ptrdname foobar.com',
'soa': '--email foo.com --expire-time 30 --minimum-ttl 20 --refresh-time 60 --retry-time 90 --serial-number 123',
'srv': '--port 1234 --priority 1 --target target.com --weight 50',
'txt': '--value some_text'
}
record_types = ['a', 'aaaa', 'caa', 'cname', 'mx', 'ns', 'ptr', 'srv', 'txt']
for t in record_types:
# test creating the record set and then adding records
self.cmd('network dns record-set {0} create -n myrs{0} -g {{rg}} --zone-name {{zone}}'.format(t))
add_command = 'set-record' if t == 'cname' else 'add-record'
self.cmd('network dns record-set {0} {2} -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0} {1}'.format(t, args[t], add_command))
# Issue 10467: FIX add-record is not idempotent
self.cmd('network dns record-set {0} {2} -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0} {1}'.format(t, args[t], add_command))
# test creating the record set at the same time you add records
self.cmd('network dns record-set {0} {2} -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0}alt {1}'.format(t, args[t], add_command))
self.cmd('network dns record-set a add-record -g {rg} --zone-name {zone} --record-set-name myrsa --ipv4-address 10.0.0.11')
self.cmd('network dns record-set soa update -g {{rg}} --zone-name {{zone}} {0}'.format(args['soa']))
long_value = '0123456789' * 50
self.cmd('network dns record-set txt add-record -g {{rg}} -z {{zone}} -n longtxt -v {0}'.format(long_value))
typed_record_sets = 2 * len(record_types) + 1
self.cmd('network dns zone show -n {zone} -g {rg}',
checks=self.check('numberOfRecordSets', base_record_sets + typed_record_sets))
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}',
checks=self.check('length(aRecords)', 2))
# test list vs. list type
self.cmd('network dns record-set list -g {rg} -z {zone}',
checks=self.check('length(@)', base_record_sets + typed_record_sets))
self.cmd('network dns record-set txt list -g {rg} -z {zone}',
checks=self.check('length(@)', 3))
for t in record_types:
self.cmd('network dns record-set {0} remove-record -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0} {1}'.format(t, args[t]))
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}',
checks=self.check('length(aRecords)', 1))
self.cmd('network dns record-set a remove-record -g {rg} --zone-name {zone} --record-set-name myrsa --ipv4-address 10.0.0.11')
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}', expect_failure=True)
self.cmd('network dns record-set a delete -n myrsa -g {rg} --zone-name {zone} -y')
self.cmd('network dns record-set cname delete -n myrscname -g {rg} --zone-name {zone} -y')
self.cmd('network dns zone delete -g {rg} -n {zone} -y',
checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_dns_if_none_match')
def test_dns_if_none_match(self, resource_group):
self.kwargs['zone'] = 'myzonex.com'
self.cmd('network dns zone list') # just verify is works (no Exception raised)
self.cmd('network dns zone create -n {zone} -g {rg}')
self.cmd('network dns zone list -g {rg}',
checks=self.check('length(@)', 1))
base_record_sets = 2
self.cmd('network dns zone show -n {zone} -g {rg}',
checks=self.check('numberOfRecordSets', base_record_sets))
args = {
'a': '--ipv4-address 10.0.0.10',
'aaaa': '--ipv6-address 2001:db8:0:1:1:1:1:1',
'caa': '--flags 0 --tag foo --value "my value"',
'cname': '--cname mycname',
'mx': '--exchange 12 --preference 13',
'ns': '--nsdname foobar.com',
'ptr': '--ptrdname foobar.com',
'soa': '--email foo.com --expire-time 30 --minimum-ttl 20 --refresh-time 60 --retry-time 90 --serial-number 123',
'srv': '--port 1234 --priority 1 --target target.com --weight 50',
'txt': '--value some_text'
}
record_types = ['a', 'aaaa', 'caa', 'cname', 'mx', 'ns', 'ptr', 'srv', 'txt']
for t in record_types:
add_command = 'set-record' if t == 'cname' else 'add-record'
# test creating the record set at the same time you add records
self.cmd('network dns record-set {0} {2} -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0} {1} --if-none-match'.format(t, args[t], add_command))
self.cmd('network dns record-set a add-record -g {rg} --zone-name {zone} --record-set-name myrsa --ipv4-address 10.0.0.11')
self.cmd('network dns record-set soa update -g {{rg}} --zone-name {{zone}} {0}'.format(args['soa']))
long_value = '0123456789' * 50
self.cmd('network dns record-set txt add-record -g {{rg}} -z {{zone}} -n longtxt -v {0}'.format(long_value))
typed_record_sets = len(record_types) + 1
self.cmd('network dns zone show -n {zone} -g {rg}',
checks=self.check('numberOfRecordSets', base_record_sets + typed_record_sets))
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}',
checks=self.check('length(aRecords)', 2))
# test list vs. list type
self.cmd('network dns record-set list -g {rg} -z {zone}',
checks=self.check('length(@)', base_record_sets + typed_record_sets))
self.cmd('network dns record-set txt list -g {rg} -z {zone}',
checks=self.check('length(@)', 2))
for t in record_types:
self.cmd('network dns record-set {0} remove-record -g {{rg}} --zone-name {{zone}} --record-set-name myrs{0} {1}'.format(t, args[t]))
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}',
checks=self.check('length(aRecords)', 1))
self.cmd('network dns record-set a remove-record -g {rg} --zone-name {zone} --record-set-name myrsa --ipv4-address 10.0.0.11')
self.cmd('network dns record-set a show -n myrsa -g {rg} --zone-name {zone}', expect_failure=True)
self.cmd('network dns record-set a delete -n myrsa -g {rg} --zone-name {zone} -y')
self.cmd('network dns record-set cname delete -n myrscname -g {rg} --zone-name {zone} -y')
self.cmd('network dns zone delete -g {rg} -n {zone} -y',
checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_dns')
def test_dns_delegation(self, resource_group):
self.kwargs['parent_zone_name'] = 'books.com'
self.cmd('network dns zone create -n {parent_zone_name} -g {rg}').get_output_in_json()
base_record_sets = 2
self.cmd('network dns zone show -n {parent_zone_name} -g {rg}',
checks=self.check('numberOfRecordSets', base_record_sets))
self.kwargs['child_zone_name'] = 'nursery.books.com'
child_zone = self.cmd('network dns zone create -n {child_zone_name} -g {rg} -p {parent_zone_name}').get_output_in_json()
child_name_server_count = len(child_zone['nameServers'])
record_sets_with_ns_delegation = 3
self.cmd('network dns zone show -n {parent_zone_name} -g {rg}',
checks=self.check('numberOfRecordSets', record_sets_with_ns_delegation)).get_output_in_json()
record_set_name = self.kwargs['child_zone_name'].replace('.' + self.kwargs['parent_zone_name'], '')
self.kwargs['record_set_name'] = record_set_name
self.cmd('network dns record-set ns show -n {record_set_name} -g {rg} --zone-name {parent_zone_name}',
checks=self.check('length(nsRecords)', child_name_server_count))
# clean up by deleting the created resources
self.cmd('network dns zone delete -g {rg} -n {parent_zone_name} -y',
checks=self.is_empty())
self.cmd('network dns zone delete -g {rg} -n {child_zone_name} -y',
checks=self.is_empty())
@ResourceGroupPreparer(name_prefix='cli_test_dns_alias')
def test_dns_alias(self, resource_group):
self.kwargs.update({
'zone': 'mytestzone1.com',
'tm_dns': 'mytesttrafficmanager12',
'tm': 'tm1'
})
self.cmd('network dns zone create -g {rg} -n {zone}')
tm = self.cmd('network traffic-manager profile create -g {rg} -n {tm} --unique-dns-name {tm_dns} --routing-method geographic').get_output_in_json()
self.kwargs['tm_id'] = tm['TrafficManagerProfile']['id']
self.cmd('network dns record-set a create -g {rg} -z {zone} -n a1 --target-resource {tm_id}',
checks=self.check("targetResource.id.contains(@, '{tm}')", True))
self.cmd('network dns record-set a update -g {rg} -z {zone} -n a1 --target-resource ""',
checks=self.check('targetResource.id', None))
class DnsParseZoneFiles(unittest.TestCase):
def _check_soa(self, zone, zone_name, ttl, serial_number, refresh, retry, expire, min_ttl):
record = zone[zone_name]['soa']
self.assertEqual(record['ttl'], ttl)
self.assertEqual(int(record['serial']), serial_number)
self.assertEqual(record['refresh'], refresh)
self.assertEqual(record['retry'], retry)
self.assertEqual(record['expire'], expire)
self.assertEqual(record['minimum'], min_ttl)
def _check_ns(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['ns']))
for i, record in enumerate(zone[name]['ns']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(record['host'], records_to_check[i][1])
def _check_mx(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['mx']))
for i, record in enumerate(zone[name]['mx']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(int(record['preference']), records_to_check[i][1])
self.assertEqual(record['host'], records_to_check[i][2])
def _check_a(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['a']))
for i, record in enumerate(zone[name]['a']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(record['ip'], records_to_check[i][1])
def _check_aaaa(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['aaaa']))
for i, record in enumerate(zone[name]['aaaa']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(record['ip'], records_to_check[i][1])
def _check_caa(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['caa']))
for i, record in enumerate(zone[name]['caa']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(int(record['flags']), records_to_check[i][1])
self.assertEqual(record['tag'], records_to_check[i][2])
self.assertEqual(record['val'], records_to_check[i][3])
def _check_cname(self, zone, name, ttl, alias):
record = zone[name]['cname']
self.assertEqual(record['ttl'], ttl)
self.assertEqual(record['alias'], alias)
def _check_ptr(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['ptr']))
for i, record in enumerate(zone[name]['ptr']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(record['host'], records_to_check[i][1])
def _check_txt(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['txt']))
for i, record in enumerate(zone[name]['txt']):
self.assertEqual(record['ttl'], records_to_check[i][0])
for txt_entry in record['txt']:
self.assertLessEqual(len(txt_entry), 255)
long_txt = ''.join(record['txt'])
if records_to_check[i][1]:
self.assertEqual(len(long_txt), records_to_check[i][1])
if records_to_check[i][2]:
self.assertEqual(long_txt, records_to_check[i][2])
def _check_srv(self, zone, name, records_to_check):
self.assertEqual(len(records_to_check), len(zone[name]['srv']))
for i, record in enumerate(zone[name]['srv']):
self.assertEqual(record['ttl'], records_to_check[i][0])
self.assertEqual(int(record['priority']), records_to_check[i][1])
self.assertEqual(int(record['weight']), records_to_check[i][2])
self.assertEqual(int(record['port']), records_to_check[i][3])
self.assertEqual(record['target'], records_to_check[i][4])
def _check_ttl(self, zone, name, rec_type, ttl):
for record in zone[name][rec_type]:
self.assertEqual(record['ttl'], ttl)
def _get_zone_object(self, file_name, zone_name): # pylint: disable=no-self-use
from azure.cli.core.util import read_file_content
file_path = os.path.join(TEST_DIR, 'zone_files', file_name)
file_text = None
file_text = read_file_content(file_path)
return parse_zone_file(file_text, zone_name)
def test_zone_file_1(self):
zn = 'zone1.com.'
zone = self._get_zone_object('zone1.txt', zn)
self._check_soa(zone, zn, 3600, 1, 3600, 300, 2419200, 300)
self._check_ns(zone, zn, [
(172800, 'ns0-00.azure-dns.com.'),
(172800, 'ns0-00.azure-dns.net.'),
(172800, 'ns0-00.azure-dns.org.'),
(172800, 'ns0-00.azure-dns.info.')
])
self._check_ns(zone, 'myns.' + zn, [(3600, 'ns.contoso.com.')])
self._check_mx(zone, 'mymx.' + zn, [(3600, 1, 'mail.contoso.com.')])
self._check_a(zone, 'manuala.' + zn, [(3600, '10.0.0.10')])
self._check_a(zone, 'mya.' + zn, [
(0, '10.0.1.0'),
(0, '10.0.1.1')
])
self._check_aaaa(zone, 'myaaaa.' + zn, [(3600, '2001:4898:e0:99:6dc4:6329:1c99:4e69')])
self._check_cname(zone, 'mycname.' + zn, 3600, 'contoso.com.')
self._check_ptr(zone, 'myname.' + zn, [(3600, 'myptrdname.')])
self._check_ptr(zone, 'myptr.' + zn, [(3600, 'contoso.com.')])
self._check_txt(zone, 'myname2.' + zn, [(3600, 9, 'manualtxt')])
self._check_txt(zone, 'mytxt2.' + zn, [
(7200, 7, 'abc def'),
(7200, 7, 'foo bar')
])
self._check_txt(zone, 'mytxtrs.' + zn, [(3600, 2, 'hi')])
self._check_srv(zone, 'mysrv.' + zn, [(3600, 1, 2, 1234, 'target.contoso.com.')])
self._check_caa(zone, 'caa1.' + zn, [
(60, 0, 'issue', 'ca1.contoso.com'),
(60, 128, 'iodef', 'mailto:test@contoso.com')
])
self._check_caa(zone, 'caa2.' + zn, [
(60, 0, 'issue', 'ca1.contoso.com'),
(60, 45, 'tag56', 'test test test')
])
def test_zone_file_2(self):
zn = 'zone2.com.'
zone = self._get_zone_object('zone2.txt', zn)
self._check_txt(zone, 'spaces.' + zn, [(3600, 5, None)])
self._check_soa(zone, zn, 3600, 10, 900, 600, 86400, 3600)
self._check_ns(zone, zn, [(3600, 'zone2.com.')])
self._check_a(zone, 'a2.' + zn, [
(3600, '1.2.3.4'),
(3600, '2.3.4.5')
])
self._check_aaaa(zone, 'aaaa2.' + zn, [
(3600, '2001:cafe:130::100'),
(3600, '2001:cafe:130::101')
])
self._check_txt(zone, 'doozie.' + zn, [(3600, 108, None)])
self._check_cname(zone, 'fee2.' + zn, 3600, 'bar.com.')
self._check_mx(zone, 'mail.' + zn, [
(3600, 10, 'mail1.mymail.com.'),
(3600, 11, 'flooble.')
])
self._check_srv(zone, 'sip.tcp.' + zn, [
(3600, 10, 20, 30, 'foobar.'),
(3600, 55, 66, 77, 'zoo.')
])
self._check_ns(zone, 'test-ns2.' + zn, [
(3600, 'ns1.com.'),
(3600, 'ns2.com.')
])
self._check_txt(zone, 'test-txt2.' + zn, [
(3600, 8, 'string 1'),
(3600, 8, 'string 2')
])
self._check_a(zone, 'aa.' + zn, [
(100, '4.5.6.7'),
(100, '6.7.8.9')
])
self._check_a(zone, '200.' + zn, [(3600, '7.8.9.0')])
self._check_mx(zone, 'aa.' + zn, [(300, 1, 'foo.com.' + zn)])
self._check_txt(zone, 'longtxt2.' + zn, [(100, 500, None)])
self._check_txt(zone, 'longtxt.' + zn, [(999, 944, None)])
self._check_txt(zone, 'myspf.' + zn, [(100, None, 'this is an SPF record! Convert to TXT on import')]) # pylint: disable=line-too-long
self._check_txt(zone, zn, [
(200, None, 'this is another SPF, this time as TXT'),
(200, None, 'v=spf1 mx ip4:14.14.22.0/23 a:mail.trum.ch mx:mese.ch include:spf.mapp.com ?all') # pylint: disable=line-too-long
])
self._check_ptr(zone, '160.1.' + zn, [(3600, 'foo.com.')])
self._check_ptr(zone, '160.2.' + zn, [
(3600, 'foobar.com.'),
(3600, 'bar.com.')
])
self._check_ptr(zone, '160.3.' + zn, [
(3600, 'foo.com.'),
(3600, 'bar.com.')
])
self._check_txt(zone, 't1.' + zn, [(3600, None, 'foobar')])
self._check_txt(zone, 't2.' + zn, [(3600, None, 'foobar')])
self._check_txt(zone, 't3.' + zn, [(3600, None, 'foobar')])
self._check_txt(zone, 't4.' + zn, [(3600, None, 'foo;bar')])
self._check_txt(zone, 't5.' + zn, [(3600, None, 'foo\\;bar')])
self._check_txt(zone, 't6.' + zn, [(3600, None, 'foo\\;bar')])
self._check_txt(zone, 't7.' + zn, [(3600, None, '\\"quoted string\\"')])
self._check_txt(zone, 't8.' + zn, [(3600, None, 'foobar')])
self._check_txt(zone, 't9.' + zn, [(3600, None, 'foobarr')])
self._check_txt(zone, 't10.' + zn, [(3600, None, 'foo bar')])
self._check_txt(zone, 't11.' + zn, [(3600, None, 'foobar')])
self._check_a(zone, 'base.' + zn, [(3600, '194.124.202.114')])
self._check_mx(zone, 'base.' + zn, [(3600, 10, 'be.xpiler.de.')])
self._check_txt(zone, 'base.' + zn, [
(3600, None, 'v=spf1 mx include:_spf4.xcaign.de include:_spf6.xcaign.de -all'),
(3600, None, 'spf2.0/mfrom,pra mx ip4:15.19.14.0/24 ip4:8.8.11.4/27 ip4:9.16.20.19/26 -all') # pylint: disable=line-too-long
])
self._check_a(zone, 'even.' + zn, [(3600, '194.124.202.114')])
self._check_mx(zone, 'even.' + zn, [(3600, 10, 'be.xpiler.de.')])
self._check_txt(zone, 'even.' + zn, [(3600, None, 'v=spf1 mx include:_spf4.xgn.de include:_spf6.xgn.de -all')]) # pylint: disable=line-too-long
def test_zone_file_3(self):
zn = 'zone3.com.'
zone = self._get_zone_object('zone3.txt', zn)
self._check_soa(zone, zn, 86400, 2003080800, 43200, 900, 1814400, 10800)
self._check_ns(zone, zn, [(86400, 'ns1.com.')])
self._check_a(zone, 'test-a.' + zn, [(3600, '1.2.3.4')])
self._check_aaaa(zone, 'test-aaaa.' + zn, [(3600, '2001:cafe:130::100')])
self._check_cname(zone, 'test-cname.' + zn, 3600, 'target.com.')
self._check_mx(zone, 'test-mx.' + zn, [(3600, 10, 'mail.com.')])
self._check_ns(zone, 'test-ns.' + zn, [(3600, 'ns1.com.')])
self._check_srv(zone, '_sip._tcp.test-srv.' + zn, [(3600, 1, 2, 3, 'target.com.')])
self._check_txt(zone, 'test-txt.' + zn, [(3600, None, 'string 1')])
self._check_a(zone, 'd1.' + zn, [
(3600, '12.1.2.3'),
(3600, '12.2.3.4'),
(3600, '12.3.4.5'),
(3600, '12.4.5.6')
])
self._check_ns(zone, 'd1.' + zn, [(3600, 'hood.com.')])
self._check_txt(zone, 'd1.' + zn, [(3600, None, 'fishfishfish')])
self._check_a(zone, 'f1.' + zn, [
(3600, '11.1.2.3'),
(3600, '11.2.3.3')
])
self._check_a(zone, 'f2.' + zn, [
(3600, '11.2.3.4'),
(3600, '11.5.6.7')
])
self._check_srv(zone, '_sip._tcp.' + zn, [(3600, 10, 20, 30, 'foo.com.')])
self._check_mx(zone, 'mail.' + zn, [(3600, 100, 'mail.test.com.')])
self._check_a(zone, 'noclass.' + zn, [
(3600, '1.2.3.4'),
(3600, '2.3.4.5')
])
self._check_txt(zone, 'txt1.' + zn, [(3600, None, 'string 1 only')])
self._check_txt(zone, 'txt2.' + zn, [(3600, None, 'string1string2')])
self._check_txt(zone, 'txt3.' + zn, [
(3600, 296, None),
(3600, None, 'string;string;string')
])
def test_zone_file_4(self):
zn = 'zone4.com.'
zone = self._get_zone_object('zone4.txt', zn)
self._check_soa(zone, zn, 3600, 2003080800, 43200, 900, 1814400, 10800)
self._check_ns(zone, zn, [(100, 'ns1.' + zn)])
self._check_ttl(zone, 'ttl-300.' + zn, 'a', 300)
self._check_ttl(zone, 'ttl-0.' + zn, 'a', 0)
self._check_ttl(zone, 'ttl-60.' + zn, 'a', 60)
self._check_ttl(zone, 'ttl-1w.' + zn, 'a', 604800)
self._check_ttl(zone, 'ttl-1d.' + zn, 'a', 86400)
self._check_ttl(zone, 'ttl-1h.' + zn, 'a', 3600)
self._check_ttl(zone, 'ttl-99s.' + zn, 'a', 99)
self._check_ttl(zone, 'ttl-100.' + zn, 'a', 100)
self._check_ttl(zone, 'ttl-6m.' + zn, 'a', 360)
self._check_ttl(zone, 'ttl-mix.' + zn, 'a', 788645)
self._check_ttl(zone, 'xttl-1w.' + zn, 'a', 604800)
self._check_ttl(zone, 'xttl-1d.' + zn, 'a', 86400)
self._check_ttl(zone, 'xttl-1h.' + zn, 'a', 3600)
self._check_ttl(zone, 'xttl-99s.' + zn, 'a', 99)
self._check_ttl(zone, 'xttl-100.' + zn, 'a', 100)
self._check_ttl(zone, 'xttl-6m.' + zn, 'a', 360)
self._check_ttl(zone, 'xttl-mix.' + zn, 'a', 788645)
self._check_a(zone, 'c1.' + zn, [
(10, '11.1.2.3'),
(10, '11.2.3.3')
])
self._check_a(zone, 'c2.' + zn, [
(5, '11.2.3.4'),
(5, '11.5.6.7')
])
def test_zone_file_5(self):
zn = 'zone5.com.'
zone = self._get_zone_object('zone5.txt', zn)
self._check_soa(zone, zn, 3600, 2003080800, 43200, 900, 1814400, 10800)
self._check_a(zone, 'default.' + zn, [(3600, '0.1.2.3')])
self._check_cname(zone, 'tc.' + zn, 3600, 'test.' + zn)
self._check_a(zone, zn, [(3600, '1.2.3.4')])
self._check_a(zone, 'www.' + zn, [(3600, '2.3.4.5')])
self._check_cname(zone, 'test-cname.' + zn, 3600, 'r1.' + zn)
self._check_mx(zone, 'test-mx.' + zn, [(3600, 10, 'm1.' + zn)])
self._check_ns(zone, 'test-ns.' + zn, [(3600, 'ns1.' + zn)])
self._check_srv(zone, 'test-srv.' + zn, [(3600, 1, 2, 3, 'srv1.' + zn)])
self._check_cname(zone, 'test-cname2.' + zn, 3600, 'r1.')
self._check_mx(zone, 'test-mx2.' + zn, [(3600, 10, 'm1.')])
self._check_ns(zone, 'test-ns2.' + zn, [(3600, 'ns1.')])
self._check_srv(zone, 'test-srv2.' + zn, [(3600, 1, 2, 3, 'srv1.')])
self._check_a(zone, 'subzone.' + zn, [(3600, '3.4.5.6')])
self._check_a(zone, 'www.subzone.' + zn, [(3600, '4.5.6.7')])
self._check_cname(zone, 'test-cname.subzone.' + zn, 3600, 'r1.subzone.' + zn)
self._check_cname(zone, 'record.' + zn, 3600, 'bar.foo.com.')
self._check_a(zone, 'test.' + zn, [(3600, '7.8.9.0')])
def test_zone_file_6(self):
zn = 'zone6.com.'
zone = self._get_zone_object('zone6.txt', zn)
self._check_soa(zone, zn, 3600, 1, 3600, 300, 2419200, 300)
self._check_a(zone, 'www.' + zn, [(3600, '1.1.1.1')])
self._check_a(zone, zn, [(3600, '1.1.1.1')])
self._check_ns(zone, zn, [
(172800, 'ns1-03.azure-dns.com.'),
(172800, 'ns2-03.azure-dns.net.'),
(172800, 'ns3-03.azure-dns.org.'),
(172800, 'ns4-03.azure-dns.info.'),
])
def test_zone_file_7(self):
zn = 'zone7.com.'
zone = self._get_zone_object('zone7.txt', zn)
self._check_soa(zone, zn, 3600, 1, 3600, 300, 2419200, 300)
self._check_txt(zone, zn, [(60, None, 'a\\\\b\\255\\000\\;\\"\\"\\"testtesttest\\"\\"\\"')])
self._check_txt(zone, 'txt1.' + zn, [(3600, None, 'ab\\ cd')])
self._check_cname(zone, 'cn1.' + zn, 3600, 'contoso.com.')
self._check_ns(zone, zn, [
(172800, 'ns1-03.azure-dns.com.'),
(172800, 'ns2-03.azure-dns.net.'),
(172800, 'ns3-03.azure-dns.org.'),
(172800, 'ns4-03.azure-dns.info.'),
])
def test_zone_file_8(self):
zn = 'zone8.com.'
zone = self._get_zone_object('zone8.txt', zn)
self._check_soa(zone, zn, 3600, 1, 3600, 300, 2419200, 300)
self._check_a(zone, 'ns.' + zn, [(3600, '1.2.3.4')])
self._check_ns(zone, zn, [
(172800, 'ns1-03.azure-dns.com.'),
(172800, 'ns2-03.azure-dns.net.'),
(172800, 'ns3-03.azure-dns.org.'),
(172800, 'ns4-03.azure-dns.info.'),
])
self._check_a(zone, '*.' + zn, [(3600, '2.3.4.5')])
def test_zone_import_errors(self):
from knack.util import CLIError
for f in ['fail1', 'fail2', 'fail3', 'fail4', 'fail5']:
with self.assertRaises(CLIError):
self._get_zone_object('{}.txt'.format(f), 'example.com')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for utility functions that manipulate or compare data.
We use a few special data formats internally, these utility functions make it a
little easier to work with them.
"""
import collections
import difflib
import itertools
import logging
import math
import numbers
import pprint
import struct
import sys
from mutablerecords import records
from past.builtins import long
from past.builtins import unicode
from enum import Enum
import six
# Used by convert_to_base_types().
PASSTHROUGH_TYPES = {bool, bytes, int, long, type(None), unicode}
def pprint_diff(first, second, first_name='first', second_name='second'):
"""Compare the pprint representation of two objects and yield diff lines."""
return difflib.unified_diff(
pprint.pformat(first).splitlines(),
pprint.pformat(second).splitlines(),
fromfile=first_name, tofile=second_name, lineterm='')
def equals_log_diff(expected, actual, level=logging.ERROR):
"""Compare two string blobs, error log diff if they don't match."""
if expected == actual:
return True
# Output the diff first.
logging.log(level, '***** Data mismatch: *****')
for line in difflib.unified_diff(
expected.splitlines(), actual.splitlines(),
fromfile='expected', tofile='actual', lineterm=''):
logging.log(level, line)
logging.log(level, '^^^^^ Data diff ^^^^^')
def assert_records_equal_nonvolatile(first, second, volatile_fields, indent=0):
"""Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared.
"""
if isinstance(first, dict) and isinstance(second, dict):
if set(first) != set(second):
logging.error('%sMismatching keys:', ' ' * indent)
logging.error('%s %s', ' ' * indent, list(first.keys()))
logging.error('%s %s', ' ' * indent, list(second.keys()))
assert set(first) == set(second)
for key in first:
if key in volatile_fields:
continue
try:
assert_records_equal_nonvolatile(first[key], second[key],
volatile_fields, indent + 2)
except AssertionError:
logging.error('%sKey: %s ^', ' ' * indent, key)
raise
elif hasattr(first, '_asdict') and hasattr(second, '_asdict'):
# Compare namedtuples as dicts so we get more useful output.
assert_records_equal_nonvolatile(first._asdict(), second._asdict(),
volatile_fields, indent)
elif hasattr(first, '__iter__') and hasattr(second, '__iter__'):
for idx, (fir, sec) in enumerate(itertools.izip(first, second)):
try:
assert_records_equal_nonvolatile(fir, sec, volatile_fields, indent + 2)
except AssertionError:
logging.error('%sIndex: %s ^', ' ' * indent, idx)
raise
elif (isinstance(first, records.RecordClass) and
isinstance(second, records.RecordClass)):
assert_records_equal_nonvolatile(
{slot: getattr(first, slot) for slot in first.__slots__},
{slot: getattr(second, slot) for slot in second.__slots__},
volatile_fields, indent)
elif first != second:
logging.error('%sRaw: "%s" != "%s"', ' ' * indent, first, second)
assert first == second
def convert_to_base_types(obj, ignore_keys=tuple(), tuple_type=tuple,
json_safe=True):
"""Recursively convert objects into base types.
This is used to convert some special types of objects used internally into
base types for more friendly output via mechanisms such as JSON. It is used
for sending internal objects via the network and outputting test records.
Specifically, the conversions that are performed:
- If an object has an as_base_types() method, immediately return the result
without any recursion; this can be used with caching in the object to
prevent unnecessary conversions.
- If an object has an _asdict() method, use that to convert it to a dict and
recursively converting its contents.
- mutablerecords Record instances are converted to dicts that map
attribute name to value. Optional attributes with a value of None are
skipped.
- Enum instances are converted to strings via their .name attribute.
- Real and integral numbers are converted to built-in types.
- Byte and unicode strings are left alone (instances of six.string_types).
- Other non-None values are converted to strings via str().
The return value contains only the Python built-in types: dict, list, tuple,
str, unicode, int, float, long, bool, and NoneType (unless tuple_type is set
to something else). If tuples should be converted to lists (e.g. for an
encoding that does not differentiate between the two), pass 'tuple_type=list'
as an argument.
If `json_safe` is True, then the float 'inf', '-inf', and 'nan' values will be
converted to strings. This ensures that the returned dictionary can be passed
to json.dumps to create valid JSON. Otherwise, json.dumps may return values
such as NaN which are not valid JSON.
"""
# Because it's *really* annoying to pass a single string accidentally.
assert not isinstance(ignore_keys, six.string_types), 'Pass a real iterable!'
if hasattr(obj, 'as_base_types'):
return obj.as_base_types()
if hasattr(obj, '_asdict'):
obj = obj._asdict()
elif isinstance(obj, records.RecordClass):
obj = {attr: getattr(obj, attr)
for attr in type(obj).all_attribute_names
if (getattr(obj, attr, None) is not None or
attr in type(obj).required_attributes)}
elif isinstance(obj, Enum):
obj = obj.name
if type(obj) in PASSTHROUGH_TYPES:
return obj
# Recursively convert values in dicts, lists, and tuples.
if isinstance(obj, dict):
return {convert_to_base_types(k, ignore_keys, tuple_type):
convert_to_base_types(v, ignore_keys, tuple_type)
for k, v in six.iteritems(obj) if k not in ignore_keys}
elif isinstance(obj, list):
return [convert_to_base_types(val, ignore_keys, tuple_type, json_safe)
for val in obj]
elif isinstance(obj, tuple):
return tuple_type(
convert_to_base_types(value, ignore_keys, tuple_type, json_safe)
for value in obj)
# Convert numeric types (e.g. numpy ints and floats) into built-in types.
elif isinstance(obj, numbers.Integral):
return long(obj)
elif isinstance(obj, numbers.Real):
as_float = float(obj)
if json_safe and (math.isinf(as_float) or math.isnan(as_float)):
return str(as_float)
return as_float
# Convert all other types to strings.
try:
return str(obj)
except:
logging.warning('Problem casting object of type %s to str.', type(obj))
raise
def total_size(obj):
"""Returns the approximate total memory footprint an object."""
seen = set()
def sizeof(current_obj):
try:
return _sizeof(current_obj)
except Exception: # pylint: disable=broad-except
# Not sure what just happened, but let's assume it's a reference.
return struct.calcsize('P')
def _sizeof(current_obj):
"""Do a depth-first acyclic traversal of all reachable objects."""
if id(current_obj) in seen:
# A rough approximation of the size cost of an additional reference.
return struct.calcsize('P')
seen.add(id(current_obj))
size = sys.getsizeof(current_obj)
if isinstance(current_obj, dict):
size += sum(map(sizeof, itertools.chain.from_iterable(
six.iteritems(current_obj))))
elif (isinstance(current_obj, collections.Iterable) and
not isinstance(current_obj, six.string_types)):
size += sum(sizeof(item) for item in current_obj)
elif isinstance(current_obj, records.RecordClass):
size += sum(sizeof(getattr(current_obj, attr))
for attr in current_obj.__slots__)
return size
return sizeof(obj)
|
|
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
NaT,
Period,
PeriodIndex,
RangeIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
isna,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
from pandas.core import nanops
def get_objs():
indexes = [
tm.makeBoolIndex(10, name="a"),
tm.makeIntIndex(10, name="a"),
tm.makeFloatIndex(10, name="a"),
tm.makeDateIndex(10, name="a"),
tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern"),
tm.makePeriodIndex(10, name="a"),
tm.makeStringIndex(10, name="a"),
tm.makeUnicodeIndex(10, name="a"),
]
arr = np.random.randn(10)
series = [Series(arr, index=idx, name="a") for idx in indexes]
objs = indexes + series
return objs
objs = get_objs()
class TestReductions:
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize("obj", objs)
def test_ops(self, opname, obj):
result = getattr(obj, opname)()
if not isinstance(obj, PeriodIndex):
expected = getattr(obj.values, opname)()
else:
expected = Period(ordinal=getattr(obj.asi8, opname)(), freq=obj.freq)
if getattr(obj, "tz", None) is not None:
# We need to de-localize before comparing to the numpy-produced result
expected = expected.astype("M8[ns]").astype("int64")
assert result.value == expected
else:
assert result == expected
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize(
"dtype, val",
[
("object", 2.0),
("float64", 2.0),
("datetime64[ns]", datetime(2011, 11, 1)),
("Int64", 2),
("boolean", True),
],
)
def test_nanminmax(self, opname, dtype, val, index_or_series):
# GH#7261
klass = index_or_series
if dtype in ["Int64", "boolean"] and klass == Index:
pytest.skip("EAs can't yet be stored in an index")
def check_missing(res):
if dtype == "datetime64[ns]":
return res is NaT
elif dtype == "Int64":
return res is pd.NA
else:
return isna(res)
obj = klass([None], dtype=dtype)
assert check_missing(getattr(obj, opname)())
assert check_missing(getattr(obj, opname)(skipna=False))
obj = klass([], dtype=dtype)
assert check_missing(getattr(obj, opname)())
assert check_missing(getattr(obj, opname)(skipna=False))
if dtype == "object":
# generic test with object only works for empty / all NaN
return
obj = klass([None, val], dtype=dtype)
assert getattr(obj, opname)() == val
assert check_missing(getattr(obj, opname)(skipna=False))
obj = klass([None, val, None], dtype=dtype)
assert getattr(obj, opname)() == val
assert check_missing(getattr(obj, opname)(skipna=False))
@pytest.mark.parametrize("opname", ["max", "min"])
def test_nanargminmax(self, opname, index_or_series):
# GH#7261
klass = index_or_series
arg_op = "arg" + opname if klass is Index else "idx" + opname
obj = klass([NaT, datetime(2011, 11, 1)])
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
obj = klass([NaT, datetime(2011, 11, 1), NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, arg_op)() == 1
result = getattr(obj, arg_op)(skipna=False)
if klass is Series:
assert np.isnan(result)
else:
assert result == -1
@pytest.mark.parametrize("opname", ["max", "min"])
@pytest.mark.parametrize("dtype", ["M8[ns]", "datetime64[ns, UTC]"])
def test_nanops_empty_object(self, opname, index_or_series, dtype):
klass = index_or_series
arg_op = "arg" + opname if klass is Index else "idx" + opname
obj = klass([], dtype=dtype)
assert getattr(obj, opname)() is NaT
assert getattr(obj, opname)(skipna=False) is NaT
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)()
with pytest.raises(ValueError, match="empty sequence"):
getattr(obj, arg_op)(skipna=False)
def test_argminmax(self):
obj = Index(np.arange(5, dtype="int64"))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([NaT, datetime(2011, 11, 1), datetime(2011, 11, 2), NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
obj = Index([NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
assert obj.argmin(skipna=False) == -1
assert obj.argmax(skipna=False) == -1
@pytest.mark.parametrize("op, expected_col", [["max", "a"], ["min", "b"]])
def test_same_tz_min_max_axis_1(self, op, expected_col):
# GH 10390
df = DataFrame(
date_range("2016-01-01 00:00:00", periods=3, tz="UTC"), columns=["a"]
)
df["b"] = df.a.subtract(Timedelta(seconds=3600))
result = getattr(df, op)(axis=1)
expected = df[expected_col].rename(None)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["maximum", "minimum"])
def test_numpy_reduction_with_tz_aware_dtype(self, tz_aware_fixture, func):
# GH 15552
tz = tz_aware_fixture
arg = pd.to_datetime(["2019"]).tz_localize(tz)
expected = Series(arg)
result = getattr(np, func)(expected, expected)
tm.assert_series_equal(result, expected)
class TestIndexReductions:
# Note: the name TestIndexReductions indicates these tests
# were moved from a Index-specific test file, _not_ that these tests are
# intended long-term to be Index-specific
@pytest.mark.parametrize(
"start,stop,step",
[
(0, 400, 3),
(500, 0, -6),
(-(10 ** 6), 10 ** 6, 4),
(10 ** 6, -(10 ** 6), -4),
(0, 10, 20),
],
)
def test_max_min_range(self, start, stop, step):
# GH#17607
idx = RangeIndex(start, stop, step)
expected = idx._int64index.max()
result = idx.max()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.max(skipna=False)
assert result2 == expected
expected = idx._int64index.min()
result = idx.min()
assert result == expected
# skipna should be irrelevant since RangeIndex should never have NAs
result2 = idx.min(skipna=False)
assert result2 == expected
# empty
idx = RangeIndex(start, stop, -step)
assert isna(idx.max())
assert isna(idx.min())
def test_minmax_timedelta64(self):
# monotonic
idx1 = TimedeltaIndex(["1 days", "2 days", "3 days"])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(["1 days", np.nan, "3 days", "NaT"])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta("1 days")
assert idx.max() == Timedelta("3 days")
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_timedelta_empty_or_na(self, op):
# Return NaT
obj = TimedeltaIndex([])
assert getattr(obj, op)() is NaT
obj = TimedeltaIndex([NaT])
assert getattr(obj, op)() is NaT
obj = TimedeltaIndex([NaT, NaT, NaT])
assert getattr(obj, op)() is NaT
def test_numpy_minmax_timedelta64(self):
td = timedelta_range("16815 days", "16820 days", freq="D")
assert np.min(td) == Timedelta("16815 days")
assert np.max(td) == Timedelta("16820 days")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(td, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(td, out=0)
def test_timedelta_ops(self):
# GH#4984
# make sure ops return Timedelta
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(0.1)
expected = Timedelta(np.timedelta64(2600, "ms"))
assert result == expected
result = td.median()
expected = to_timedelta("00:00:09")
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH#6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta("00:01:21")
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# GH#10040
# make sure NaT is properly handled by median()
s = Series([Timestamp("2015-02-03"), Timestamp("2015-02-07")])
assert s.diff().median() == timedelta(days=4)
s = Series(
[Timestamp("2015-02-03"), Timestamp("2015-02-07"), Timestamp("2015-02-15")]
)
assert s.diff().median() == timedelta(days=6)
@pytest.mark.parametrize("opname", ["skew", "kurt", "sem", "prod", "var"])
def test_invalid_td64_reductions(self, opname):
s = Series(
[Timestamp("20130101") + timedelta(seconds=i * i) for i in range(10)]
)
td = s.diff()
msg = "|".join(
[
f"reduction operation '{opname}' not allowed for this dtype",
rf"cannot perform {opname} with type timedelta64\[ns\]",
f"'TimedeltaArray' does not implement reduction '{opname}'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(td, opname)()
with pytest.raises(TypeError, match=msg):
getattr(td.to_frame(), opname)(numeric_only=False)
def test_minmax_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# monotonic
idx1 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = DatetimeIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], tz=tz
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp("2011-01-01", tz=tz)
assert idx.max() == Timestamp("2011-01-03", tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
@pytest.mark.parametrize("op", ["min", "max"])
def test_minmax_nat_datetime64(self, op):
# Return NaT
obj = DatetimeIndex([])
assert isna(getattr(obj, op)())
obj = DatetimeIndex([NaT])
assert isna(getattr(obj, op)())
obj = DatetimeIndex([NaT, NaT, NaT])
assert isna(getattr(obj, op)())
def test_numpy_minmax_integer(self):
# GH#26125
idx = Index([1, 2, 3])
expected = idx.values.max()
result = np.max(idx)
assert result == expected
expected = idx.values.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
expected = idx.values.argmax()
result = np.argmax(idx)
assert result == expected
expected = idx.values.argmin()
result = np.argmin(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(idx, out=0)
def test_numpy_minmax_range(self):
# GH#26125
idx = RangeIndex(0, 10, 3)
expected = idx._int64index.max()
result = np.max(idx)
assert result == expected
expected = idx._int64index.min()
result = np.min(idx)
assert result == expected
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(idx, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(idx, out=0)
# No need to test again argmax/argmin compat since the implementation
# is the same as basic integer index
def test_numpy_minmax_datetime64(self):
dr = date_range(start="2016-01-15", end="2016-01-20")
assert np.min(dr) == Timestamp("2016-01-15 00:00:00")
assert np.max(dr) == Timestamp("2016-01-20 00:00:00")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(dr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(dr, out=0)
def test_minmax_period(self):
# monotonic
idx1 = PeriodIndex([NaT, "2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
assert not idx1.is_monotonic
assert idx1[1:].is_monotonic
# non-monotonic
idx2 = PeriodIndex(
["2011-01-01", NaT, "2011-01-03", "2011-01-02", NaT], freq="D"
)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Period("2011-01-01", freq="D")
assert idx.max() == Period("2011-01-03", freq="D")
assert idx1.argmin() == 1
assert idx2.argmin() == 0
assert idx1.argmax() == 3
assert idx2.argmax() == 2
for op in ["min", "max"]:
# Return NaT
obj = PeriodIndex([], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
obj = PeriodIndex([NaT, NaT, NaT], freq="M")
result = getattr(obj, op)()
assert result is NaT
def test_numpy_minmax_period(self):
pr = pd.period_range(start="2016-01-15", end="2016-01-20")
assert np.min(pr) == Period("2016-01-15", freq="D")
assert np.max(pr) == Period("2016-01-20", freq="D")
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.min(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.max(pr, out=0)
assert np.argmin(pr) == 0
assert np.argmax(pr) == 5
errmsg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=errmsg):
np.argmin(pr, out=0)
with pytest.raises(ValueError, match=errmsg):
np.argmax(pr, out=0)
def test_min_max_categorical(self):
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
msg = (
r"Categorical is not ordered for operation min\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.min()
msg = (
r"Categorical is not ordered for operation max\n"
r"you can use .as_ordered\(\) to change the Categorical to an ordered one\n"
)
with pytest.raises(TypeError, match=msg):
ci.max()
ci = pd.CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=True)
assert ci.min() == "c"
assert ci.max() == "b"
class TestSeriesReductions:
# Note: the name TestSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype("f4")
arr[:, 2] = np.inf
with pd.option_context("mode.use_inf_as_na", True):
tm.assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
@pytest.mark.parametrize("dtype", ["float64", "Int64", "boolean", "object"])
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty(self, method, unit, use_bottleneck, dtype):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#9422 / GH#18921
# Entirely empty
s = Series([], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
result = getattr(s, method)(skipna=False, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=False, min_count=1)
assert isna(result)
# All-NA
s = Series([np.nan], dtype=dtype)
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# Mix of valid, empty
s = Series([np.nan, 1], dtype=dtype)
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
# GH#844 (changed in GH#9422)
df = DataFrame(np.empty((10, 0)), dtype=dtype)
assert (getattr(df, method)(1) == unit).all()
s = Series([1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
result = getattr(s, method)(skipna=False, min_count=2)
assert isna(result)
s = Series([np.nan], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
s = Series([np.nan, 1], dtype=dtype)
result = getattr(s, method)(min_count=2)
assert isna(result)
@pytest.mark.parametrize("method, unit", [("sum", 0.0), ("prod", 1.0)])
def test_empty_multi(self, method, unit):
s = Series(
[1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([("a", "b"), (0, 1)]),
)
# 1 / 0 by default
with tm.assert_produces_warning(FutureWarning):
result = getattr(s, method)(level=0)
expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=0
with tm.assert_produces_warning(FutureWarning):
result = getattr(s, method)(level=0, min_count=0)
expected = Series([1, unit], index=["a", "b"])
tm.assert_series_equal(result, expected)
# min_count=1
with tm.assert_produces_warning(FutureWarning):
result = getattr(s, method)(level=0, min_count=1)
expected = Series([1, np.nan], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["mean"])
@pytest.mark.parametrize("dtype", ["Float64", "Int64", "boolean"])
def test_ops_consistency_on_empty_nullable(self, method, dtype):
# GH#34814
# consistency for nullable dtypes on empty or ALL-NA mean
# empty series
eser = Series([], dtype=dtype)
result = getattr(eser, method)()
assert result is pd.NA
# ALL-NA series
nser = Series([np.nan], dtype=dtype)
result = getattr(nser, method)()
assert result is pd.NA
@pytest.mark.parametrize("method", ["mean", "median", "std", "var"])
def test_ops_consistency_on_empty(self, method):
# GH#7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert isna(result)
# timedelta64[ns]
tdser = Series([], dtype="m8[ns]")
if method == "var":
msg = "|".join(
[
"operation 'var' not allowed",
r"cannot perform var with type timedelta64\[ns\]",
"'TimedeltaArray' does not implement reduction 'var'",
]
)
with pytest.raises(TypeError, match=msg):
getattr(tdser, method)()
else:
result = getattr(tdser, method)()
assert result is NaT
def test_nansum_buglet(self):
ser = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(ser)
tm.assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
def test_sum_overflow(self, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH#6915
# overflowing on the smaller int dtypes
for dtype in ["int32", "int64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype="int64")
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ["float32", "float64"]:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_empty_timeseries_reductions_return_nat(self):
# covers GH#11245
for dtype in ("m8[ns]", "m8[ns]", "M8[ns]", "M8[ns, UTC]"):
assert Series([], dtype=dtype).min() is NaT
assert Series([], dtype=dtype).max() is NaT
assert Series([], dtype=dtype).min(skipna=False) is NaT
assert Series([], dtype=dtype).max(skipna=False) is NaT
def test_numpy_argmin(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
result = np.argmin(s)
expected = np.argmin(data)
assert result == expected
result = s.argmin()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmin(s, out=data)
def test_numpy_argmax(self):
# See GH#16830
data = np.arange(1, 11)
s = Series(data, index=data)
result = np.argmax(s)
expected = np.argmax(data)
assert result == expected
result = s.argmax()
assert result == expected
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.argmax(s, out=data)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmin()] == string_series.min()
assert isna(string_series.idxmin(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert nona.index.values.tolist().index(nona.idxmin()) == nona.values.argmin()
# all NaNs
allna = string_series * np.nan
assert isna(allna.idxmin())
# datetime64[ns]
s = Series(date_range("20130102", periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
string_series = tm.makeStringSeries().rename("series")
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmax()] == string_series.max()
assert isna(string_series.idxmax(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert nona.index.values.tolist().index(nona.idxmax()) == nona.values.argmax()
# all NaNs
allna = string_series * np.nan
assert isna(allna.idxmax())
from pandas import date_range
s = Series(date_range("20130102", periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH#5914
s = Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(["abc", True])
assert s.any()
@pytest.mark.parametrize("klass", [Index, Series])
def test_numpy_all_any(self, klass):
# GH#40180
idx = klass([0, 1, 2])
assert not np.all(idx)
assert np.any(idx)
idx = Index([1, 2, 3])
assert np.all(idx)
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert s2.any(skipna=False)
assert not s2.any(skipna=True)
# Check level.
s = Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2])
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(s.all(level=0), Series([False, True, False]))
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(s.any(level=0), Series([False, True, True]))
msg = "Option bool_only is not implemented with option level"
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(FutureWarning):
s.any(bool_only=True, level=0)
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(FutureWarning):
s.all(bool_only=True, level=0)
# bool_only is not implemented alone.
# TODO GH38810 change this error message to:
# "Series.any does not implement bool_only"
msg = "Series.any does not implement numeric_only"
with pytest.raises(NotImplementedError, match=msg):
s.any(bool_only=True)
msg = "Series.all does not implement numeric_only."
with pytest.raises(NotImplementedError, match=msg):
s.all(bool_only=True)
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
def test_any_all_object_dtype(self, bool_agg_func, skipna):
# GH#12863
ser = Series(["a", "b", "c", "d", "e"], dtype=object)
result = getattr(ser, bool_agg_func)(skipna=skipna)
expected = True
assert result == expected
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
@pytest.mark.parametrize(
"data", [[False, None], [None, False], [False, np.nan], [np.nan, False]]
)
def test_any_all_object_dtype_missing(self, data, bool_agg_func):
# GH#27709
ser = Series(data)
result = getattr(ser, bool_agg_func)(skipna=False)
# None is treated is False, but np.nan is treated as True
expected = bool_agg_func == "any" and None not in data
assert result == expected
@pytest.mark.parametrize("bool_agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
# expected_data indexed as [[skipna=False/any, skipna=False/all],
# [skipna=True/any, skipna=True/all]]
"data,expected_data",
[
([False, False, False], [[False, False], [False, False]]),
([True, True, True], [[True, True], [True, True]]),
([pd.NA, pd.NA, pd.NA], [[pd.NA, pd.NA], [False, True]]),
([False, pd.NA, False], [[pd.NA, False], [False, False]]),
([True, pd.NA, True], [[True, pd.NA], [True, True]]),
([True, pd.NA, False], [[True, False], [True, False]]),
],
)
def test_any_all_boolean_kleene_logic(
self, bool_agg_func, skipna, data, expected_data
):
ser = Series(data, dtype="boolean")
expected = expected_data[skipna][bool_agg_func == "all"]
result = getattr(ser, bool_agg_func)(skipna=skipna)
assert (result is pd.NA and expected is pd.NA) or result == expected
@pytest.mark.parametrize(
"bool_agg_func,expected",
[("all", [False, True, False]), ("any", [False, True, True])],
)
def test_any_all_boolean_level(self, bool_agg_func, expected):
# GH#33449
ser = Series(
[False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2],
dtype="boolean",
)
with tm.assert_produces_warning(FutureWarning):
result = getattr(ser, bool_agg_func)(level=0)
expected = Series(expected, dtype="boolean")
tm.assert_series_equal(result, expected)
def test_any_axis1_bool_only(self):
# GH#32432
df = DataFrame({"A": [True, False], "B": [1, 2]})
result = df.any(axis=1, bool_only=True)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_any_all_datetimelike(self):
# GH#38723 these may not be the desired long-term behavior (GH#34479)
# but in the interim should be internally consistent
dta = date_range("1995-01-02", periods=3)._data
ser = Series(dta)
df = DataFrame(ser)
assert dta.all()
assert dta.any()
assert ser.all()
assert ser.any()
assert df.any().all()
assert df.all().all()
dta = dta.tz_localize("UTC")
ser = Series(dta)
df = DataFrame(ser)
assert dta.all()
assert dta.any()
assert ser.all()
assert ser.any()
assert df.any().all()
assert df.all().all()
tda = dta - dta[0]
ser = Series(tda)
df = DataFrame(ser)
assert tda.any()
assert not tda.all()
assert ser.any()
assert not ser.all()
assert df.any().all()
assert not df.all().any()
def test_timedelta64_analytics(self):
# index min/max
dti = date_range("2012-1-1", periods=3, freq="D")
td = Series(dti) - Timestamp("20120101")
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH#2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(date_range("20120101", periods=3))
s2 = Series(date_range("20120102", periods=3))
expected = Series(s2 - s1)
result = np.abs(s1 - s2)
tm.assert_series_equal(result, expected)
result = (s1 - s2).abs()
tm.assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta("2 days")
assert result == expected
result = td.min()
expected = Timedelta("1 days")
assert result == expected
@pytest.mark.parametrize(
"test_input,error_type",
[
(Series([], dtype="float64"), ValueError),
# For strings, or any Series with dtype 'O'
(Series(["foo", "bar", "baz"]), TypeError),
(Series([(1,), (2,)]), TypeError),
# For mixed data types
(Series(["foo", "foo", "bar", "bar", None, np.nan, "baz"]), TypeError),
],
)
def test_assert_idxminmax_raises(self, test_input, error_type):
"""
Cases where ``Series.argmax`` and related should raise an exception
"""
msg = (
"reduction operation 'argmin' not allowed for this dtype|"
"attempt to get argmin of an empty sequence"
)
with pytest.raises(error_type, match=msg):
test_input.idxmin()
with pytest.raises(error_type, match=msg):
test_input.idxmin(skipna=False)
msg = (
"reduction operation 'argmax' not allowed for this dtype|"
"attempt to get argmax of an empty sequence"
)
with pytest.raises(error_type, match=msg):
test_input.idxmax()
with pytest.raises(error_type, match=msg):
test_input.idxmax(skipna=False)
def test_idxminmax_with_inf(self):
# For numeric data with NA and Inf (GH #13595)
s = Series([0, -np.inf, np.inf, np.nan])
assert s.idxmin() == 1
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 2
assert np.isnan(s.idxmax(skipna=False))
# Using old-style behavior that treats floating point nan, -inf, and
# +inf as missing
with pd.option_context("mode.use_inf_as_na", True):
assert s.idxmin() == 0
assert np.isnan(s.idxmin(skipna=False))
assert s.idxmax() == 0
np.isnan(s.idxmax(skipna=False))
class TestDatetime64SeriesReductions:
# Note: the name TestDatetime64SeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"nat_ser",
[
Series([NaT, NaT]),
Series([NaT, Timedelta("nat")]),
Series([Timedelta("nat"), Timedelta("nat")]),
],
)
def test_minmax_nat_series(self, nat_ser):
# GH#23282
assert nat_ser.min() is NaT
assert nat_ser.max() is NaT
assert nat_ser.min(skipna=False) is NaT
assert nat_ser.max(skipna=False) is NaT
@pytest.mark.parametrize(
"nat_df",
[
DataFrame([NaT, NaT]),
DataFrame([NaT, Timedelta("nat")]),
DataFrame([Timedelta("nat"), Timedelta("nat")]),
],
)
def test_minmax_nat_dataframe(self, nat_df):
# GH#23282
assert nat_df.min()[0] is NaT
assert nat_df.max()[0] is NaT
assert nat_df.min(skipna=False)[0] is NaT
assert nat_df.max(skipna=False)[0] is NaT
def test_min_max(self):
rng = date_range("1/1/2000", "12/31/2000")
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, Timestamp)
assert isinstance(the_max, Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = date_range("1/1/2000", periods=10, freq="4h")
lvls = ["A", "A", "A", "B", "B", "B", "C", "C", "C", "C"]
df = DataFrame({"TS": rng, "V": np.random.randn(len(rng)), "L": lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
assert isinstance(result, Timestamp)
assert result == exp
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
assert isinstance(result, Timestamp)
assert result == exp
class TestCategoricalSeriesReductions:
# Note: the name TestCategoricalSeriesReductions indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_unordered_raises(self, function):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
msg = f"Categorical is not ordered for operation {function}"
with pytest.raises(TypeError, match=msg):
getattr(cat, function)()
@pytest.mark.parametrize(
"values, categories",
[
(list("abc"), list("abc")),
(list("abc"), list("cba")),
(list("abc") + [np.nan], list("cba")),
([1, 2, 3], [3, 2, 1]),
([1, 2, 3, np.nan], [3, 2, 1]),
],
)
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_ordered(self, values, categories, function):
# GH 25303
cat = Series(Categorical(values, categories=categories, ordered=True))
result = getattr(cat, function)(skipna=True)
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_ordered_with_nan_only(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Series(Categorical([np.nan], categories=[1, 2], ordered=True))
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_skipna(self, function, skipna):
cat = Series(
Categorical(["a", "b", np.nan, "a"], categories=["b", "a"], ordered=True)
)
result = getattr(cat, function)(skipna=skipna)
if skipna is True:
expected = "b" if function == "min" else "a"
assert result == expected
else:
assert result is np.nan
class TestSeriesMode:
# Note: the name TestSeriesMode indicates these tests
# were moved from a series-specific test file, _not_ that these tests are
# intended long-term to be series-specific
@pytest.mark.parametrize(
"dropna, expected",
[(True, Series([], dtype=np.float64)), (False, Series([], dtype=np.float64))],
)
def test_mode_empty(self, dropna, expected):
s = Series([], dtype=np.float64)
result = s.mode(dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, data, expected",
[
(True, [1, 1, 1, 2], [1]),
(True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
(False, [1, 1, 1, 2], [1]),
(False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
],
)
@pytest.mark.parametrize(
"dt", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
)
def test_mode_numerical(self, dropna, data, expected, dt):
s = Series(data, dtype=dt)
result = s.mode(dropna)
expected = Series(expected, dtype=dt)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna, expected", [(True, [1.0]), (False, [1, np.nan])])
def test_mode_numerical_nan(self, dropna, expected):
s = Series([1, 1, 2, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[(True, ["b"], ["bar"], ["nan"]), (False, ["b"], [np.nan], ["nan"])],
)
def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
# Test string and object types.
data = ["a"] * 2 + ["b"] * 3
s = Series(data, dtype="c")
result = s.mode(dropna)
expected1 = Series(expected1, dtype="c")
tm.assert_series_equal(result, expected1)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected2)
data = ["foo", "bar", "bar", np.nan, np.nan, np.nan]
s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, ["foo"], ["foo"]), (False, ["foo"], [np.nan])],
)
def test_mode_mixeddtype(self, dropna, expected1, expected2):
s = Series([1, "foo", "foo"])
result = s.mode(dropna)
expected = Series(expected1)
tm.assert_series_equal(result, expected)
s = Series([1, "foo", "foo", np.nan, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(
True,
["1900-05-03", "2011-01-03", "2013-01-02"],
["2011-01-03", "2013-01-02"],
),
(False, [np.nan], [np.nan, "2011-01-03", "2013-01-02"]),
],
)
def test_mode_datetime(self, dropna, expected1, expected2):
s = Series(
["2011-01-03", "2013-01-02", "1900-05-03", "nan", "nan"], dtype="M8[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="M8[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"2011-01-03",
"2013-01-02",
"1900-05-03",
"2011-01-03",
"2013-01-02",
"nan",
"nan",
],
dtype="M8[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="M8[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[
(True, ["-1 days", "0 days", "1 days"], ["2 min", "1 day"]),
(False, [np.nan], [np.nan, "2 min", "1 day"]),
],
)
def test_mode_timedelta(self, dropna, expected1, expected2):
# gh-5986: Test timedelta types.
s = Series(
["1 days", "-1 days", "0 days", "nan", "nan"], dtype="timedelta64[ns]"
)
result = s.mode(dropna)
expected1 = Series(expected1, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected1)
s = Series(
[
"1 day",
"1 day",
"-1 day",
"-1 day 2 min",
"2 min",
"2 min",
"nan",
"nan",
],
dtype="timedelta64[ns]",
)
result = s.mode(dropna)
expected2 = Series(expected2, dtype="timedelta64[ns]")
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize(
"dropna, expected1, expected2, expected3",
[
(
True,
Categorical([1, 2], categories=[1, 2]),
Categorical(["a"], categories=[1, "a"]),
Categorical([3, 1], categories=[3, 2, 1], ordered=True),
),
(
False,
Categorical([np.nan], categories=[1, 2]),
Categorical([np.nan, "a"], categories=[1, "a"]),
Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True),
),
],
)
def test_mode_category(self, dropna, expected1, expected2, expected3):
s = Series(Categorical([1, 2, np.nan, np.nan]))
result = s.mode(dropna)
expected1 = Series(expected1, dtype="category")
tm.assert_series_equal(result, expected1)
s = Series(Categorical([1, "a", "a", np.nan, np.nan]))
result = s.mode(dropna)
expected2 = Series(expected2, dtype="category")
tm.assert_series_equal(result, expected2)
s = Series(
Categorical(
[1, 1, 2, 3, 3, np.nan, np.nan], categories=[3, 2, 1], ordered=True
)
)
result = s.mode(dropna)
expected3 = Series(expected3, dtype="category")
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize(
"dropna, expected1, expected2",
[(True, [2 ** 63], [1, 2 ** 63]), (False, [2 ** 63], [1, 2 ** 63])],
)
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
s = Series([1, 2 ** 63, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
s = Series([1, 2 ** 63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
expected = Series(["foo", np.nan])
s = Series([1, "foo", "foo", np.nan, np.nan])
with tm.assert_produces_warning(UserWarning):
result = s.mode(dropna=False)
result = result.sort_values().reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_mode_boolean_with_na(self):
# GH#42107
ser = Series([True, False, True, pd.NA], dtype="boolean")
result = ser.mode()
expected = Series({0: True}, dtype="boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array,expected,dtype",
[
(
[0, 1j, 1, 1, 1 + 1j, 1 + 2j],
Series([1], dtype=np.complex128),
np.complex128,
),
(
[0, 1j, 1, 1, 1 + 1j, 1 + 2j],
Series([1], dtype=np.complex64),
np.complex64,
),
(
[1 + 1j, 2j, 1 + 1j],
Series([1 + 1j], dtype=np.complex128),
np.complex128,
),
],
)
def test_single_mode_value_complex(self, array, expected, dtype):
result = Series(array, dtype=dtype).mode()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array,expected,dtype",
[
(
# no modes
[0, 1j, 1, 1 + 1j, 1 + 2j],
Series([0j, 1j, 1 + 0j, 1 + 1j, 1 + 2j], dtype=np.complex128),
np.complex128,
),
(
[1 + 1j, 2j, 1 + 1j, 2j, 3],
Series([2j, 1 + 1j], dtype=np.complex64),
np.complex64,
),
],
)
def test_multimode_complex(self, array, expected, dtype):
# GH 17927
# mode tries to sort multimodal series.
# Complex numbers are sorted by their magnitude
result = Series(array, dtype=dtype).mode()
tm.assert_series_equal(result, expected)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import featurestore_online_service
from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import FeaturestoreOnlineServingServiceGrpcTransport
class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(
FeaturestoreOnlineServingServiceTransport
):
"""gRPC AsyncIO backend transport for FeaturestoreOnlineServingService.
A service for serving online feature values.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def read_feature_values(
self,
) -> Callable[
[featurestore_online_service.ReadFeatureValuesRequest],
Awaitable[featurestore_online_service.ReadFeatureValuesResponse],
]:
r"""Return a callable for the read feature values method over gRPC.
Reads Feature values of a specific entity of an
EntityType. For reading feature values of multiple
entities of an EntityType, please use
StreamingReadFeatureValues.
Returns:
Callable[[~.ReadFeatureValuesRequest],
Awaitable[~.ReadFeatureValuesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "read_feature_values" not in self._stubs:
self._stubs["read_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues",
request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize,
response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize,
)
return self._stubs["read_feature_values"]
@property
def streaming_read_feature_values(
self,
) -> Callable[
[featurestore_online_service.StreamingReadFeatureValuesRequest],
Awaitable[featurestore_online_service.ReadFeatureValuesResponse],
]:
r"""Return a callable for the streaming read feature values method over gRPC.
Reads Feature values for multiple entities. Depending
on their size, data for different entities may be broken
up across multiple responses.
Returns:
Callable[[~.StreamingReadFeatureValuesRequest],
Awaitable[~.ReadFeatureValuesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "streaming_read_feature_values" not in self._stubs:
self._stubs[
"streaming_read_feature_values"
] = self.grpc_channel.unary_stream(
"/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues",
request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize,
response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize,
)
return self._stubs["streaming_read_feature_values"]
def close(self):
return self.grpc_channel.close()
__all__ = ("FeaturestoreOnlineServingServiceGrpcAsyncIOTransport",)
|
|
#
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import ddt
from oslo_config import fixture as config_fixture
from oslo_policy import policy as base_policy
from heat.common import exception
from heat.common import policy
from heat.tests import common
from heat.tests import utils
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
@ddt.ddt
class TestPolicyEnforcer(common.HeatTestCase):
def setUp(self):
super(TestPolicyEnforcer, self).setUp(
mock_resource_policy=False, mock_find_file=False)
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=['--config-dir', policy_path])
def get_policy_file(self, filename):
return policy_path + filename
def _get_context(self, persona):
if persona == "system_admin":
ctx = utils.dummy_system_admin_context()
elif persona == "system_reader":
ctx = utils.dummy_system_reader_context()
elif persona == "project_admin":
ctx = utils.dummy_context(roles=['admin', 'member', 'reader'])
elif persona == "project_member":
ctx = utils.dummy_context(roles=['member', 'reader'])
elif persona == "project_reader":
ctx = utils.dummy_context(roles=['reader'])
elif persona == "stack_user":
ctx = utils.dummy_context(roles=['heat_stack_user'])
elif persona == "anyone":
ctx = utils.dummy_context(roles=['foobar'])
else:
self.fail("Persona [{}] not found".format(persona))
return ctx
def _test_legacy_rbac_policies(self, **kwargs):
scope = kwargs.get("scope")
actions = kwargs.get("actions")
allowed_personas = kwargs.get("allowed", [])
denied_personas = kwargs.get("denied", [])
self._test_policy_allowed(scope, actions, allowed_personas)
self._test_policy_notallowed(scope, actions, denied_personas)
@ddt.file_data('policy/test_acl_personas.yaml')
@ddt.unpack
def test_legacy_rbac_policies(self, **kwargs):
self._test_legacy_rbac_policies(**kwargs)
@ddt.file_data('policy/test_deprecated_access.yaml')
@ddt.unpack
def test_deprecated_policies(self, **kwargs):
self._test_legacy_rbac_policies(**kwargs)
@ddt.file_data('policy/test_acl_personas.yaml')
@ddt.unpack
def test_secure_rbac_policies(self, **kwargs):
self.fixture.config(group='oslo_policy', enforce_scope=True)
self.fixture.config(group='oslo_policy', enforce_new_defaults=True)
scope = kwargs.get("scope")
actions = kwargs.get("actions")
allowed_personas = kwargs.get("allowed", [])
denied_personas = kwargs.get("denied", [])
self._test_policy_allowed(scope, actions, allowed_personas)
self._test_policy_notallowed(scope, actions, denied_personas)
def _test_policy_allowed(self, scope, actions, personas):
enforcer = policy.Enforcer(scope=scope)
for persona in personas:
ctx = self._get_context(persona)
for action in actions:
# Everything should be allowed
enforcer.enforce(
ctx,
action,
target={"project_id": "test_tenant_id"},
is_registered_policy=True
)
def _test_policy_notallowed(self, scope, actions, personas):
enforcer = policy.Enforcer(scope=scope)
for persona in personas:
ctx = self._get_context(persona)
for action in actions:
# Everything should raise the default exception.Forbidden
self.assertRaises(
exception.Forbidden,
enforcer.enforce, ctx,
action,
target={"project_id": "test_tenant_id"},
is_registered_policy=True)
def test_set_rules_overwrite_true(self):
enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 1}, True)
self.assertEqual({'test_heat_rule': 1}, enforcer.enforcer.rules)
def test_set_rules_overwrite_false(self):
enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 1}, False)
self.assertIn('test_heat_rule', enforcer.enforcer.rules)
def test_load_rules_force_reload_true(self):
enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 'test'})
enforcer.load_rules(True)
self.assertNotIn({'test_heat_rule': 'test'}, enforcer.enforcer.rules)
def test_load_rules_force_reload_false(self):
enforcer = policy.Enforcer()
enforcer.load_rules(True)
enforcer.load_rules(True)
enforcer.set_rules({'test_heat_rule': 'test'})
enforcer.load_rules(False)
self.assertIn('test_heat_rule', enforcer.enforcer.rules)
def test_no_such_action(self):
ctx = utils.dummy_context(roles=['not_a_stack_user'])
enforcer = policy.Enforcer(scope='cloudformation')
action = 'no_such_action'
msg = 'cloudformation:no_such_action has not been registered'
self.assertRaisesRegex(base_policy.PolicyNotRegistered,
msg,
enforcer.enforce,
ctx, action,
None, None,
True)
def test_check_admin(self):
enforcer = policy.Enforcer()
ctx = utils.dummy_context(roles=[])
self.assertFalse(enforcer.check_is_admin(ctx))
ctx = utils.dummy_context(roles=['not_admin'])
self.assertFalse(enforcer.check_is_admin(ctx))
ctx = utils.dummy_context(roles=['admin'])
self.assertTrue(enforcer.check_is_admin(ctx))
def test_enforce_creds(self):
enforcer = policy.Enforcer()
ctx = utils.dummy_context(roles=['admin'])
self.assertTrue(enforcer.check_is_admin(ctx))
def test_resource_default_rule(self):
context = utils.dummy_context(roles=['non-admin'])
enforcer = policy.ResourceEnforcer()
res_type = "OS::Test::NotInPolicy"
self.assertTrue(enforcer.enforce(context, res_type,
is_registered_policy=True))
def test_resource_enforce_success(self):
context = utils.dummy_context(roles=['admin'])
enforcer = policy.ResourceEnforcer()
res_type = "OS::Keystone::User"
self.assertTrue(enforcer.enforce(context, res_type,
is_registered_policy=True))
def test_resource_enforce_fail(self):
context = utils.dummy_context(roles=['non-admin'])
enforcer = policy.ResourceEnforcer()
res_type = "OS::Nova::Quota"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
context, res_type,
None, None,
True)
self.assertIn(res_type, ex.message)
def test_resource_wildcard_enforce_fail(self):
context = utils.dummy_context(roles=['non-admin'])
enforcer = policy.ResourceEnforcer()
res_type = "OS::Keystone::User"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
context, res_type,
None, None,
True)
self.assertIn(res_type.split("::", 1)[0], ex.message)
def test_resource_enforce_returns_false(self):
context = utils.dummy_context(roles=['non-admin'])
enforcer = policy.ResourceEnforcer(exc=None)
res_type = "OS::Keystone::User"
self.assertFalse(enforcer.enforce(context, res_type,
is_registered_policy=True))
self.assertIsNotNone(enforcer.enforce(context, res_type,
is_registered_policy=True))
def test_resource_enforce_exc_on_false(self):
context = utils.dummy_context(roles=['non-admin'])
enforcer = policy.ResourceEnforcer()
res_type = "OS::Keystone::User"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
context, res_type,
None, None,
True)
self.assertIn(res_type, ex.message)
def test_resource_enforce_override_deny_admin(self):
context = utils.dummy_context(roles=['admin'])
enforcer = policy.ResourceEnforcer(
policy_file=self.get_policy_file('resources.json'))
res_type = "OS::Cinder::Quota"
ex = self.assertRaises(exception.Forbidden,
enforcer.enforce,
context, res_type,
None, None,
True)
self.assertIn(res_type, ex.message)
|
|
"""
Upload API for Flickr.
It is separated since it requires different treatments than
the usual API.
Two functions are provided:
- upload (supporting both sync and async modes)
- replace (presently not working)
Author: Dmitriy Bryndin
email: bryndin@gmail.com
Date: 08/24/2014
Author: Alexis Mignon (c)
email: alexis.mignon@gmail.com
Date: 06/08/2011
"""
import os
import logging
from xml.etree import ElementTree
from tornado.gen import coroutine, Return
from tornado.ioloop import PeriodicCallback
from tornado.concurrent import Future
from flickrerrors import FlickrError, FlickrAPIError
from objects import Photo
import auth
import multipart
UPLOAD_URL = "https://api.flickr.com/services/upload/"
REPLACE_URL = "https://api.flickr.com/services/replace/"
_futures = {}
log = logging.getLogger("tornado.application")
def format_dict(d):
d_ = {}
for k, v in d.iteritems():
if isinstance(v, bool):
v = int(v)
elif isinstance(v, unicode):
v = v.encode("utf8")
if isinstance(k, unicode):
k = k.encode("utf8")
v = str(v)
d_[k] = v
return d_
@coroutine
def post(url, auth_handler, photo_file, **kwargs):
kwargs = format_dict(kwargs)
kwargs["api_key"] = auth_handler.key
params = auth_handler.complete_parameters(url, kwargs).parameters
fields = params.items()
files = [("photo", os.path.basename(photo_file), open(photo_file).read())]
try:
response = yield multipart.posturl(url, fields, files)
except Exception as e:
raise e
if response.code != 200:
raise FlickrError("HTTP Error %i: %s" % (response.code, response.body))
r = ElementTree.fromstring(response.body)
if r.get("stat") != 'ok':
err = r[0]
raise FlickrAPIError(int(err.get("code")), err.get("msg"))
raise Return(r)
@coroutine
def upload(**kwargs):
"""
Authentication:
This method requires authentication with 'write' permission.
Arguments:
photo_file
The file to upload.
title (optional)
The title of the photo.
description (optional)
A description of the photo. May contain some limited HTML.
tags (optional)
A space-separated list of tags to apply to the photo.
is_public, is_friend, is_family (optional)
Set to 0 for no, 1 for yes. Specifies who can view the photo.
safety_level (optional)
Set to 1 for Safe, 2 for Moderate, or 3 for Restricted.
content_type (optional)
Set to 1 for Photo, 2 for Screenshot, or 3 for Other.
hidden (optional)
Set to 1 to keep the photo in global search results, 2 to hide
from public searches.
async
set to 1 for async mode, 0 for sync mode
"""
if "async" not in kwargs:
kwargs["async"] = False
if auth.AUTH_HANDLER is None:
raise FlickrError("Not authenticated")
photo_file = kwargs.pop("photo_file")
try:
resp_body = yield post(UPLOAD_URL, auth.AUTH_HANDLER, photo_file, **kwargs)
except Exception as e:
log.error("Failed to upload %s" % photo_file)
raise e
t = resp_body[0]
if t.tag == 'photoid':
# sync mode, got a photo
raise Return(Photo(id=t.text,
editurl='https://www.flickr.com/photos/upload/edit/?ids=' + t.text))
elif t.tag == 'ticketid':
# async mode, got a ticket
if not _futures:
_periodic_checks.start()
_futures[t.text] = Future()
try:
yield _futures[t.text]
except Exception as e:
raise e
raise Return(Photo(id=t.text,
editurl='https://www.flickr.com/photos/upload/edit/?ids=' + t.text))
else:
raise FlickrError("Unexpected tag: %s" % t.tag)
def replace(**kwargs):
"""
Authentication:
This method requires authentication with 'write' permission.
For details of how to obtain authentication tokens and how to sign
calls, see the authentication api spec. Note that the 'photo' parameter
should not be included in the signature. All other POST parameters
should be included when generating the signature.
Arguments:
photo_file
The file to upload.
photo_id
The ID of the photo to replace.
async (optional)
Photos may be replaced in async mode, for applications that
don't want to wait around for an upload to complete, leaving
a socket connection open the whole time. Processing photos
asynchronously is recommended. Please consult the documentation
for details.
"""
if "async" not in kwargs:
kwargs["async"] = False
if "photo" in kwargs:
kwargs["photo_id"] = kwargs.pop("photo").id
photo_file = kwargs.pop("photo_file")
try:
resp_body = yield post(REPLACE_URL, auth.AUTH_HANDLER, photo_file, **kwargs)
except Exception as e:
raise e
t = resp_body[0]
if t.tag == 'photoid':
# sync mode, got a photo
raise Return(Photo(id=t.text,
editurl='https://www.flickr.com/photos/upload/edit/?ids=' + t.text))
elif t.tag == 'ticketid':
# async mode, got a ticket
if not _futures:
_periodic_checks.start()
_futures[t.text] = Future()
try:
yield _futures[t.text]
except Exception as e:
raise e
raise Return(Photo(id=t.text,
editurl='https://www.flickr.com/photos/upload/edit/?ids=' + t.text))
else:
raise FlickrError("Unexpected tag: %s" % t.tag)
@coroutine
def _check_tickets():
try:
tickets = yield Photo.checkUploadTickets(_futures.keys())
except Exception as e:
print e
raise e
for t in tickets:
f = _futures[t.id]
del _futures[t.id]
if not _futures:
_periodic_checks.stop()
if t.get("complete", 0) == 1:
# completed successfully
f.set_result()
elif t.get("complete", 0) == 2:
# ticket failed, problem converting photo?
f.set_exception(FlickrError("Ticket %s failed" % t.id))
elif t.get("invalid", 0) == 1:
# ticket not found
f.set_exception(FlickrError("Ticket %s not found" % t.id))
CHECK_PERIOD = 2*1000 # how often check if tickets are ready
_periodic_checks = PeriodicCallback(_check_tickets, CHECK_PERIOD)
|
|
import time
import shutil
import os
from pkg_resources import resource_filename
from fanstatic.checksum import list_directory, md5, mtime
from fanstatic.checksum import VCS_NAMES, IGNORED_EXTENSIONS
def _copy_testdata(tmpdir):
src = resource_filename('tests', 'testdata/SomePackage')
dst = tmpdir / 'SomePackage'
shutil.copytree(src, str(dst))
return dst
def test_list_directory(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
expected = [
tmpdir.join('SomePackage/setup.py').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path, include_directories=False))
assert sorted(found) == sorted(expected)
expected.extend([
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
])
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
def test_list_directory_no_vcs_name(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
tmpdir.join('/SomePackage/.novcs').ensure(dir=True)
tmpdir.join('/SomePackage/.novcs/foo').write('Contents of foo')
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/.novcs').strpath,
tmpdir.join('SomePackage/.novcs/foo').strpath,
tmpdir.join('SomePackage/setup.py').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
def test_list_directory_vcs_name(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
for name in VCS_NAMES:
tmpdir.join('/SomePackage/%s' % name).ensure(dir=True)
tmpdir.join('/SomePackage/%s/foo' % name).write('Contents of foo')
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/setup.py').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
tmpdir.join('/SomePackage/%s' % name).remove(rec=True)
def test_list_directory_dot_file(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
tmpdir.join('/SomePackage/.woekie').ensure()
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/.woekie').strpath,
tmpdir.join('SomePackage/setup.py').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
def test_list_directory_ignored_extensions(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
for ext in IGNORED_EXTENSIONS:
tmpdir.join('/SomePackage/bar%s' % ext).ensure()
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/setup.py').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
def test_mtime(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
sleep = 0.5
# Sleep extra long on filesystems that report in seconds
# instead of milliseconds.
if os.path.getmtime(os.curdir).is_integer():
sleep += 1
# Compute a first mtime for the test package:
mtime_start = mtime(testdata_path)
# Add a file (+ contents!) and see the mtime changed:
tmpdir.join('/SomePackage/A').write('Contents for A')
mtime_after_add = mtime(testdata_path)
assert mtime_after_add != mtime_start
# Remove the file again, the mtime changed:
time.sleep(sleep)
tmpdir.join('/SomePackage/A').remove()
mtime_after_remove = mtime(testdata_path)
assert mtime_after_remove != mtime_after_add
assert mtime_after_remove != mtime_start
# Obviously, changing the contents will change the mtime too:
tmpdir.join('/SomePackage/B').write('Contents for B')
mtime_start = mtime(testdata_path)
# Wait a split second in order to let the disk catch up.
time.sleep(sleep)
tmpdir.join('/SomePackage/B').write('Contents for B have changed')
assert mtime(testdata_path) != mtime_start
tmpdir.join('/SomePackage/B').remove()
# Moving, or renaming a file should change the mtime:
mtime_start = mtime(testdata_path)
time.sleep(sleep)
tmpdir.join('/SomePackage/setup.py').rename(
tmpdir.join('/SomePackage/setup.py.renamed'))
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/setup.py.renamed').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
assert mtime(testdata_path) != mtime_start
def test_md5(tmpdir):
testdata_path = str(_copy_testdata(tmpdir))
# Compute a first md5 for the test package:
md5_start = md5(testdata_path)
# Add a file (+ contents!) and see the md5 changed:
tmpdir.join('/SomePackage/A').write('Contents for A')
md5_after_add = md5(testdata_path)
assert md5_after_add != md5_start
# Remove the file again, the md5 is back to the previous one:
# This is a difference from the mtime approach!
tmpdir.join('/SomePackage/A').remove()
md5_after_remove = md5(testdata_path)
assert md5_after_remove != md5_after_add
assert md5_after_remove == md5_start
# Obviously, changing the contents will change the md5 too:
tmpdir.join('/SomePackage/B').write('Contents for B')
md5_start = md5(testdata_path)
# Wait a split second in order to let the disk catch up.
tmpdir.join('/SomePackage/B').write('Contents for B have changed')
assert md5(testdata_path) != md5_start
tmpdir.join('/SomePackage/B').remove()
# Moving, or renaming a file should change the md5:
md5_start = md5(testdata_path)
tmpdir.join('/SomePackage/setup.py').rename(
tmpdir.join('/SomePackage/setup.py.renamed'))
expected = [
tmpdir.join('SomePackage').strpath,
tmpdir.join('SomePackage/MANIFEST.in').strpath,
tmpdir.join('SomePackage/setup.py.renamed').strpath,
tmpdir.join('SomePackage/src').strpath,
tmpdir.join('SomePackage/src/somepackage').strpath,
tmpdir.join('SomePackage/src/somepackage/__init__.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources.py').strpath,
tmpdir.join('SomePackage/src/somepackage/resources').strpath,
tmpdir.join('SomePackage/src/somepackage/resources/style.css').strpath,
]
found = list(list_directory(testdata_path))
assert sorted(found) == sorted(expected)
assert md5(testdata_path) != md5_start
|
|
# python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import stat
import tempfile
import unittest
from unittest import mock
from spanner_orm import error
from spanner_orm.admin import migration
from spanner_orm.admin import migration_executor
from spanner_orm.admin import migration_manager
from spanner_orm.admin import update
class MigrationsTest(unittest.TestCase):
TEST_DIR = tempfile.mkdtemp()
TEST_MIGRATIONS_DIR = os.path.join(TEST_DIR, 'migrations')
def test_retrieve(self):
testdata_filename = os.path.join(os.path.dirname(__file__), 'migrations')
manager = migration_manager.MigrationManager(testdata_filename)
migrations = manager.migrations
self.assertEqual(len(migrations), 3)
self.assertEqual(migrations[2].prev_migration_id,
migrations[1].migration_id)
self.assertEqual(migrations[1].prev_migration_id,
migrations[0].migration_id)
def test_generate(self):
testdata_filename = os.path.join(os.path.dirname(__file__), 'migrations')
shutil.rmtree(self.TEST_MIGRATIONS_DIR)
shutil.copytree(testdata_filename, self.TEST_MIGRATIONS_DIR)
os.chmod(self.TEST_MIGRATIONS_DIR, stat.S_IRWXO | stat.S_IRWXU)
for f in os.listdir(self.TEST_MIGRATIONS_DIR):
file_path = os.path.join(self.TEST_MIGRATIONS_DIR, f)
if not os.path.isdir(file_path):
os.chmod(file_path,
stat.S_IROTH | stat.S_IWOTH | stat.S_IRUSR | stat.S_IWUSR)
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
path = manager.generate('test migration')
try:
migration_ = manager._migration_from_file(path)
self.assertIsNotNone(migration_.migration_id)
self.assertIsNotNone(migration_.prev_migration_id)
self.assertIsInstance(migration_.upgrade(), update.NoUpdate)
self.assertIsInstance(migration_.downgrade(), update.NoUpdate)
finally:
shutil.rmtree(self.TEST_MIGRATIONS_DIR)
def test_order_migrations(self):
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
migrations = [third, first, second]
expected_order = [first, second, third]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
self.assertEqual(manager._order_migrations(migrations), expected_order)
def test_order_migrations_with_no_none(self):
first = migration.Migration('2', '1')
second = migration.Migration('3', '2')
third = migration.Migration('4', '3')
migrations = [third, first, second]
expected_order = [first, second, third]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
self.assertEqual(manager._order_migrations(migrations), expected_order)
def test_order_migrations_error_on_unclear_successor(self):
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '1')
migrations = [third, first, second]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
with self.assertRaisesRegex(error.SpannerError, 'unclear successor'):
manager._order_migrations(migrations)
def test_order_migrations_error_on_unclear_start_migration(self):
first = migration.Migration('1', None)
second = migration.Migration('3', '2')
migrations = [first, second]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
with self.assertRaisesRegex(error.SpannerError, 'no valid previous'):
manager._order_migrations(migrations)
def test_order_migrations_error_on_circular_dependency(self):
first = migration.Migration('1', '3')
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
migrations = [third, first, second]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
with self.assertRaisesRegex(error.SpannerError, 'No valid migration'):
manager._order_migrations(migrations)
def test_order_migrations_error_on_no_successor(self):
first = migration.Migration('1', None)
second = migration.Migration('2', '3')
third = migration.Migration('3', '2')
migrations = [third, first, second]
manager = migration_manager.MigrationManager(self.TEST_MIGRATIONS_DIR)
with self.assertRaisesRegex(error.SpannerError, 'no successor'):
manager._order_migrations(migrations)
def test_filter_migrations(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
migrations = [first, second, third]
migrated = {'1': True, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
filtered = executor._filter_migrations(migrations, False, None)
self.assertEqual(filtered, [second, third])
filtered = executor._filter_migrations(migrations, False, '2')
self.assertEqual(filtered, [second])
filtered = executor._filter_migrations(reversed(migrations), True, '1')
self.assertEqual(filtered, [first])
def test_filter_migrations_error_on_bad_last_migration(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
migrations = [first, second, third]
migrated = {'1': True, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
with self.assertRaises(error.SpannerError):
executor._filter_migrations(migrations, False, '1')
with self.assertRaises(error.SpannerError):
executor._filter_migrations(migrations, False, '4')
def test_validate_migrations(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
with mock.patch.object(executor, 'migrations') as migrations:
migrations.return_value = [first, second, third]
migrated = {'1': True, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
executor._validate_migrations()
migrated = {'1': False, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
executor._validate_migrations()
def test_validate_migrations_error_on_unmigrated_after_migrated(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
with mock.patch.object(executor, 'migrations') as migrations:
migrations.return_value = [first, second, third]
migrated = {'1': False, '2': True, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
with self.assertRaises(error.SpannerError):
executor._validate_migrations()
migrated = {'1': False, '2': False, '3': True}
with mock.patch.object(executor, '_migration_status_map', migrated):
with self.assertRaises(error.SpannerError):
executor._validate_migrations()
def test_validate_migrations_error_on_unmigrated_first(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('2', '1')
with mock.patch.object(executor, 'migrations') as migrations:
migrations.return_value = [first]
migrated = {'1': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
with self.assertRaises(error.SpannerError):
executor._validate_migrations()
migrated = {}
with mock.patch.object(executor, '_migration_status_map', migrated):
with self.assertRaises(error.SpannerError):
executor._validate_migrations()
def test_migrate(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
with mock.patch.object(executor, 'migrations') as migrations:
migrations.return_value = [first, second, third]
migrated = {'1': True, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
executor.migrate()
self.assertEqual(migrated, {'1': True, '2': True, '3': True})
def test_rollback(self):
connection = mock.Mock()
executor = migration_executor.MigrationExecutor(connection,
self.TEST_MIGRATIONS_DIR)
first = migration.Migration('1', None)
second = migration.Migration('2', '1')
third = migration.Migration('3', '2')
with mock.patch.object(executor, 'migrations') as migrations:
migrations.return_value = [first, second, third]
migrated = {'1': True, '2': False, '3': False}
with mock.patch.object(executor, '_migration_status_map', migrated):
executor.rollback('1')
self.assertEqual(migrated, {'1': False, '2': False, '3': False})
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(MigrationsTest.TEST_DIR)
if __name__ == '__main__':
logging.basicConfig()
unittest.main()
|
|
import functools
import logging
import simplejson
import urlparse
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
provider_obj = request.registry.get('auth.oauth.provider')
providers = provider_obj.search_read(request.cr, SUPERUSER_ID, [('enabled', '=', True), ('auth_endpoint', '!=', False), ('validation_endpoint', '!=', False)])
# TODO in forwardport: remove conditions on 'auth_endpoint' and 'validation_endpoint' when these fields will be 'required' in model
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
debug=request.debug,
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=simplejson.dumps(state),
)
provider['auth_link'] = provider['auth_endpoint'] + '?' + werkzeug.url_encode(params)
return providers
def get_state(self, provider):
redirect = request.params.get('redirect', 'web')
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
@http.route()
def web_auth_signup(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_signup(*args, **kw)
response.qcontext.update(providers=providers)
return response
@http.route()
def web_auth_reset_password(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_reset_password(*args, **kw)
response.qcontext.update(providers=providers)
return response
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
return login_and_redirect(*credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(**kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of nova-cells RPC API (for talking to the nova-cells service
within a cell).
This is different than communication between child and parent nova-cells
services. That communication is handled by the cells driver via the
messging module.
"""
from oslo.config import cfg
from nova import exception
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import proxy as rpc_proxy
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('topic', 'nova.cells.opts', group='cells')
rpcapi_cap_opt = cfg.StrOpt('cells',
default=None,
help='Set a version cap for messages sent to local cells services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CellsAPI(rpc_proxy.RpcProxy):
'''Cells client-side RPC API
API version history:
1.0 - Initial version.
1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
1.2 - Adds service_get_all(), service_get_by_compute_host(),
and proxy_rpc_to_compute_manager()
1.3 - Adds task_log_get_all()
1.4 - Adds compute_node_get(), compute_node_get_all(), and
compute_node_stats()
1.5 - Adds actions_get(), action_get_by_request_id(), and
action_events_get()
1.6 - Adds consoleauth_delete_tokens() and validate_console_port()
... Grizzly supports message version 1.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.6.
1.7 - Adds service_update()
1.8 - Adds build_instances(), deprecates schedule_run_instance()
1.9 - Adds get_capacities()
1.10 - Adds bdm_update_or_create_at_top(), and bdm_destroy_at_top()
1.11 - Adds get_migrations()
1.12 - Adds instance_start() and instance_stop()
1.13 - Adds cell_create(), cell_update(), cell_delete(), and
cell_get()
1.14 - Adds reboot_instance()
1.15 - Adds suspend_instance() and resume_instance()
1.16 - Adds instance_update_from_api()
'''
BASE_RPC_API_VERSION = '1.0'
VERSION_ALIASES = {
'grizzly': '1.6'
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cells,
CONF.upgrade_levels.cells)
super(CellsAPI, self).__init__(topic=CONF.cells.topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer(),
version_cap=version_cap)
def cast_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a cast to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
self.cast(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=False))
def call_compute_api_method(self, ctxt, cell_name, method,
*args, **kwargs):
"""Make a call to a compute API method in a certain cell."""
method_info = {'method': method,
'method_args': args,
'method_kwargs': kwargs}
return self.call(ctxt, self.make_msg('run_compute_api_method',
cell_name=cell_name,
method_info=method_info,
call=True))
# NOTE(alaski): Deprecated and should be removed later.
def schedule_run_instance(self, ctxt, **kwargs):
"""Schedule a new instance for creation."""
self.cast(ctxt, self.make_msg('schedule_run_instance',
host_sched_kwargs=kwargs))
def build_instances(self, ctxt, **kwargs):
"""Build instances."""
build_inst_kwargs = kwargs
instances = build_inst_kwargs['instances']
instances_p = [jsonutils.to_primitive(inst) for inst in instances]
build_inst_kwargs['instances'] = instances_p
build_inst_kwargs['image'] = jsonutils.to_primitive(
build_inst_kwargs['image'])
self.cast(ctxt, self.make_msg('build_instances',
build_inst_kwargs=build_inst_kwargs),
version='1.8')
def instance_update_at_top(self, ctxt, instance):
"""Update instance at API level."""
if not CONF.cells.enable:
return
# Make sure we have a dict, not a SQLAlchemy model
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance_p))
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy instance at API level."""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_destroy_at_top',
instance=instance_p))
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""Delete instance everywhere. delete_type may be 'soft'
or 'hard'. This is generally only used to resolve races
when API cell doesn't know to what cell an instance belongs.
"""
if not CONF.cells.enable:
return
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('instance_delete_everywhere',
instance=instance_p,
delete_type=delete_type))
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top."""
if not CONF.cells.enable:
return
instance_fault_p = jsonutils.to_primitive(instance_fault)
self.cast(ctxt, self.make_msg('instance_fault_create_at_top',
instance_fault=instance_fault_p))
def bw_usage_update_at_top(self, ctxt, uuid, mac, start_period,
bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None):
"""Broadcast upwards that bw_usage was updated."""
if not CONF.cells.enable:
return
bw_update_info = {'uuid': uuid,
'mac': mac,
'start_period': start_period,
'bw_in': bw_in,
'bw_out': bw_out,
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'last_refreshed': last_refreshed}
self.cast(ctxt, self.make_msg('bw_usage_update_at_top',
bw_update_info=bw_update_info))
def instance_info_cache_update_at_top(self, ctxt, instance_info_cache):
"""Broadcast up that an instance's info_cache has changed."""
if not CONF.cells.enable:
return
iicache = jsonutils.to_primitive(instance_info_cache)
instance = {'uuid': iicache['instance_uuid'],
'info_cache': iicache}
self.cast(ctxt, self.make_msg('instance_update_at_top',
instance=instance))
def get_cell_info_for_neighbors(self, ctxt):
"""Get information about our neighbor cells from the manager."""
if not CONF.cells.enable:
return []
return self.call(ctxt, self.make_msg('get_cell_info_for_neighbors'),
version='1.1')
def sync_instances(self, ctxt, project_id=None, updated_since=None,
deleted=False):
"""Ask all cells to sync instance data."""
if not CONF.cells.enable:
return
return self.cast(ctxt, self.make_msg('sync_instances',
project_id=project_id,
updated_since=updated_since,
deleted=deleted),
version='1.1')
def service_get_all(self, ctxt, filters=None):
"""Ask all cells for their list of services."""
return self.call(ctxt,
self.make_msg('service_get_all',
filters=filters),
version='1.2')
def service_get_by_compute_host(self, ctxt, host_name):
"""Get the service entry for a host in a particular cell. The
cell name should be encoded within the host_name.
"""
return self.call(ctxt, self.make_msg('service_get_by_compute_host',
host_name=host_name),
version='1.2')
def service_update(self, ctxt, host_name, binary, params_to_update):
"""
Used to enable/disable a service. For compute services, setting to
disabled stops new builds arriving on that host.
:param host_name: the name of the host machine that the service is
running
:param binary: The name of the executable that the service runs as
:param params_to_update: eg. {'disabled': True}
"""
return self.call(ctxt, self.make_msg(
'service_update', host_name=host_name,
binary=binary, params_to_update=params_to_update),
version='1.7')
def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
timeout=None):
"""Proxy RPC to a compute manager. The host in the topic
should be encoded with the target cell name.
"""
return self.call(ctxt, self.make_msg('proxy_rpc_to_manager',
topic=topic,
rpc_message=rpc_message,
call=call,
timeout=timeout),
timeout=timeout,
version='1.2')
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get the task logs from the DB in child cells."""
return self.call(ctxt, self.make_msg('task_log_get_all',
task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state),
version='1.3')
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
return self.call(ctxt, self.make_msg('compute_node_get',
compute_id=compute_id),
version='1.4')
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells, optionally
filtering by hypervisor host.
"""
return self.call(ctxt,
self.make_msg('compute_node_get_all',
hypervisor_match=hypervisor_match),
version='1.4')
def compute_node_stats(self, ctxt):
"""Return compute node stats from all cells."""
return self.call(ctxt, self.make_msg('compute_node_stats'),
version='1.4')
def actions_get(self, ctxt, instance):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('actions_get',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid']),
version='1.5')
def action_get_by_request_id(self, ctxt, instance, request_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_get_by_request_id',
cell_name=instance['cell_name'],
instance_uuid=instance['uuid'],
request_id=request_id),
version='1.5')
def action_events_get(self, ctxt, instance, action_id):
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance['uuid'])
return self.call(ctxt, self.make_msg('action_events_get',
cell_name=instance['cell_name'],
action_id=action_id),
version='1.5')
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.cast(ctxt, self.make_msg('consoleauth_delete_tokens',
instance_uuid=instance_uuid),
version='1.6')
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
return self.call(ctxt,
self.make_msg('validate_console_port',
instance_uuid=instance_uuid,
console_port=console_port,
console_type=console_type),
version='1.6')
def get_capacities(self, ctxt, cell_name=None):
return self.call(ctxt,
self.make_msg('get_capacities', cell_name=cell_name),
version='1.9')
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None):
"""Create or update a block device mapping in API cells. If
create is True, only try to create. If create is None, try to
update but fall back to create. If create is False, only attempt
to update. This maps to nova-conductor's behavior.
"""
if not CONF.cells.enable:
return
try:
self.cast(ctxt, self.make_msg('bdm_update_or_create_at_top',
bdm=bdm, create=create),
version='1.10')
except Exception:
LOG.exception(_("Failed to notify cells of BDM update/create."))
def bdm_destroy_at_top(self, ctxt, instance_uuid, device_name=None,
volume_id=None):
"""Broadcast upwards that a block device mapping was destroyed.
One of device_name or volume_id should be specified.
"""
if not CONF.cells.enable:
return
try:
self.cast(ctxt, self.make_msg('bdm_destroy_at_top',
instance_uuid=instance_uuid,
device_name=device_name,
volume_id=volume_id),
version='1.10')
except Exception:
LOG.exception(_("Failed to notify cells of BDM destroy."))
def get_migrations(self, ctxt, filters):
"""Get all migrations applying the filters."""
return self.call(ctxt, self.make_msg('get_migrations',
filters=filters), version='1.11')
def instance_update_from_api(self, ctxt, instance, expected_vm_state,
expected_task_state, admin_state_reset):
"""Update an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
self.cast(ctxt,
self.make_msg('instance_update_from_api',
instance=instance,
expected_vm_state=expected_vm_state,
expected_task_state=expected_task_state,
admin_state_reset=admin_state_reset),
version='1.16')
def start_instance(self, ctxt, instance):
"""Start an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
self.cast(ctxt,
self.make_msg('start_instance', instance=instance),
version='1.12')
def stop_instance(self, ctxt, instance, do_cast=True):
"""Stop an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
method = do_cast and self.cast or self.call
return method(ctxt,
self.make_msg('stop_instance', instance=instance,
do_cast=do_cast),
version='1.12')
def cell_create(self, ctxt, values):
return self.call(ctxt,
self.make_msg('cell_create', values=values),
version='1.13')
def cell_update(self, ctxt, cell_name, values):
return self.call(ctxt,
self.make_msg('cell_update',
cell_name=cell_name,
values=values),
version='1.13')
def cell_delete(self, ctxt, cell_name):
return self.call(ctxt,
self.make_msg('cell_delete', cell_name=cell_name),
version='1.13')
def cell_get(self, ctxt, cell_name):
return self.call(ctxt,
self.make_msg('cell_get', cell_name=cell_name),
version='1.13')
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
"""Reboot an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
self.cast(ctxt,
self.make_msg('reboot_instance', instance=instance,
reboot_type=reboot_type),
version='1.14')
def suspend_instance(self, ctxt, instance):
"""Suspend an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
self.cast(ctxt,
self.make_msg('suspend_instance', instance=instance),
version='1.15')
def resume_instance(self, ctxt, instance):
"""Resume an instance in its cell.
This method takes a new-world instance object.
"""
if not CONF.cells.enable:
return
self.cast(ctxt,
self.make_msg('resume_instance', instance=instance),
version='1.15')
|
|
import sys
import ctypes
import unittest
from dynd import ndt
class TestType(unittest.TestCase):
def test_tuple(self):
tp = ndt.tuple(ndt.int32, ndt.float64)
def test_struct(self):
tp = ndt.struct(x = ndt.int32, y = ndt.float64)
def test_callable(self):
tp = ndt.callable(ndt.void, ndt.int32, ndt.float64, x = ndt.complex128)
class TestTypeFor(unittest.TestCase):
def test_bool(self):
self.assertEqual(ndt.bool, ndt.type_for(True))
self.assertEqual(ndt.bool, ndt.type_for(False))
def test_int(self):
self.assertEqual(ndt.int32, ndt.type_for(0))
self.assertEqual(ndt.int32, ndt.type_for(1))
self.assertEqual(ndt.int32, ndt.type_for(7))
def test_float(self):
pass
class TestDType(unittest.TestCase):
def test_bool_type_properties(self):
self.assertEqual(type(ndt.bool), ndt.type)
self.assertEqual(str(ndt.bool), 'bool')
self.assertEqual(ndt.bool.data_size, 1)
self.assertEqual(ndt.bool.data_alignment, 1)
def test_int_type_properties(self):
self.assertEqual(type(ndt.int8), ndt.type)
self.assertEqual(str(ndt.int8), 'int8')
self.assertEqual(ndt.int8.data_size, 1)
self.assertEqual(ndt.int8.data_alignment, 1)
self.assertEqual(type(ndt.int16), ndt.type)
self.assertEqual(str(ndt.int16), 'int16')
self.assertEqual(ndt.int16.data_size, 2)
self.assertEqual(ndt.int16.data_alignment, 2)
self.assertEqual(type(ndt.int32), ndt.type)
self.assertEqual(str(ndt.int32), 'int32')
self.assertEqual(ndt.int32.data_size, 4)
self.assertEqual(ndt.int32.data_alignment, 4)
self.assertEqual(type(ndt.int64), ndt.type)
self.assertEqual(str(ndt.int64), 'int64')
self.assertEqual(ndt.int64.data_size, 8)
self.assertTrue(ndt.int64.data_alignment in [4,8])
self.assertEqual(type(ndt.intptr), ndt.type)
if ctypes.sizeof(ctypes.c_void_p) == 4:
self.assertEqual(str(ndt.intptr), 'int32')
self.assertEqual(ndt.intptr.data_size, 4)
self.assertEqual(ndt.intptr.data_alignment, 4)
else:
self.assertEqual(str(ndt.intptr), 'int64')
self.assertEqual(ndt.intptr.data_size, 8)
self.assertEqual(ndt.intptr.data_alignment, 8)
def test_uint_type_properties(self):
self.assertEqual(type(ndt.uint8), ndt.type)
self.assertEqual(str(ndt.uint8), 'uint8')
self.assertEqual(ndt.uint8.data_size, 1)
self.assertEqual(ndt.uint8.data_alignment, 1)
self.assertEqual(type(ndt.uint16), ndt.type)
self.assertEqual(str(ndt.uint16), 'uint16')
self.assertEqual(ndt.uint16.data_size, 2)
self.assertEqual(ndt.uint16.data_alignment, 2)
self.assertEqual(type(ndt.uint32), ndt.type)
self.assertEqual(str(ndt.uint32), 'uint32')
self.assertEqual(ndt.uint32.data_size, 4)
self.assertEqual(ndt.uint32.data_alignment, 4)
self.assertEqual(type(ndt.uint64), ndt.type)
self.assertEqual(str(ndt.uint64), 'uint64')
self.assertEqual(ndt.uint64.data_size, 8)
self.assertTrue(ndt.uint64.data_alignment in [4,8])
self.assertEqual(type(ndt.uintptr), ndt.type)
if ctypes.sizeof(ctypes.c_void_p) == 4:
self.assertEqual(str(ndt.uintptr), 'uint32')
self.assertEqual(ndt.uintptr.data_size, 4)
self.assertEqual(ndt.uintptr.data_alignment, 4)
else:
self.assertEqual(str(ndt.uintptr), 'uint64')
self.assertEqual(ndt.uintptr.data_size, 8)
self.assertEqual(ndt.uintptr.data_alignment, 8)
def test_float_type_properties(self):
self.assertEqual(type(ndt.float32), ndt.type)
self.assertEqual(str(ndt.float32), 'float32')
self.assertEqual(ndt.float32.data_size, 4)
self.assertEqual(ndt.float32.data_alignment, 4)
self.assertEqual(type(ndt.float64), ndt.type)
self.assertEqual(str(ndt.float64), 'float64')
self.assertEqual(ndt.float64.data_size, 8)
self.assertTrue(ndt.float64.data_alignment in [4,8])
def test_complex_type_properties(self):
self.assertEqual(type(ndt.complex_float32), ndt.type)
self.assertEqual(str(ndt.complex_float32), 'complex[float32]')
self.assertEqual(ndt.complex_float32.data_size, 8)
self.assertEqual(ndt.complex_float32.data_alignment, 4)
self.assertEqual(type(ndt.complex_float64), ndt.type)
self.assertEqual(str(ndt.complex_float64), 'complex[float64]')
self.assertEqual(ndt.complex_float64.data_size, 16)
self.assertTrue(ndt.complex_float64.data_alignment in [4,8])
def test_fixed_string_type_properties(self):
d = ndt.make_fixed_string(10, 'ascii')
self.assertEqual(str(d), "fixed_string[10, 'ascii']")
self.assertEqual(d.data_size, 10)
self.assertEqual(d.data_alignment, 1)
# self.assertEqual(d.encoding, 'ascii')
d = ndt.make_fixed_string(10, 'ucs2')
self.assertEqual(str(d), "fixed_string[10, 'ucs2']")
self.assertEqual(d.data_size, 20)
self.assertEqual(d.data_alignment, 2)
# self.assertEqual(d.encoding, 'ucs2')
d = ndt.make_fixed_string(10, 'utf8')
self.assertEqual(str(d), 'fixed_string[10]')
self.assertEqual(d.data_size, 10)
self.assertEqual(d.data_alignment, 1)
# self.assertEqual(d.encoding, 'utf8')
d = ndt.make_fixed_string(10, 'utf16')
self.assertEqual(str(d), "fixed_string[10, 'utf16']")
self.assertEqual(d.data_size, 20)
self.assertEqual(d.data_alignment, 2)
# self.assertEqual(d.encoding, 'utf16')
d = ndt.make_fixed_string(10, 'utf32')
self.assertEqual(str(d), "fixed_string[10, 'utf32']")
self.assertEqual(d.data_size, 40)
self.assertEqual(d.data_alignment, 4)
# self.assertEqual(d.encoding, 'utf32')
def test_scalar_types(self):
self.assertEqual(ndt.bool, ndt.type(bool))
self.assertEqual(ndt.int32, ndt.type(int))
self.assertEqual(ndt.float64, ndt.type(float))
self.assertEqual(ndt.complex_float64, ndt.type(complex))
self.assertEqual(ndt.string, ndt.type(str))
self.assertEqual(ndt.bytes, ndt.type(bytearray))
if sys.version_info[0] == 2:
self.assertEqual(ndt.string, ndt.type(unicode))
if sys.version_info[0] >= 3:
self.assertEqual(ndt.bytes, ndt.type(bytes))
def test_fixed_bytes_type(self):
d = ndt.make_fixed_bytes(4, 4)
self.assertEqual(str(d), 'fixed_bytes[4, align=4]')
self.assertEqual(d.data_size, 4)
self.assertEqual(d.data_alignment, 4)
d = ndt.make_fixed_bytes(9, 1)
self.assertEqual(str(d), 'fixed_bytes[9]')
self.assertEqual(d.data_size, 9)
self.assertEqual(d.data_alignment, 1)
# Alignment must not be greater than data_size
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 1, 2)
# Alignment must be a power of 2
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 6, 3)
# Alignment must divide into the data_size
self.assertRaises(RuntimeError, ndt.make_fixed_bytes, 6, 4)
def test_cstruct_type(self):
self.assertFalse(ndt.type('{x: int32}') == ndt.type('{y: int32}'))
def test_callable_type(self):
tp = ndt.callable(ndt.int32, ndt.float64)
def test_struct_type(self):
tp = ndt.make_struct([ndt.int32, ndt.int64], ['x', 'y'])
self.assertTrue(tp.field_types, [ndt.int32, ndt.int64])
self.assertTrue(tp.field_names, ['x', 'y'])
self.assertEqual(tp.arrmeta_size, 2 * ctypes.sizeof(ctypes.c_void_p))
self.assertTrue(tp.data_size is None)
def test_tuple_type(self):
tp = ndt.type('(int32, int64)')
self.assertTrue(tp.field_types, [ndt.int32, ndt.int64])
self.assertEqual(tp.arrmeta_size, 2 * ctypes.sizeof(ctypes.c_void_p))
self.assertTrue(tp.data_size is None)
def test_type_shape(self):
# The shape attribute of ndt.type
tp = ndt.type('3 * 4 * int32')
self.assertEqual(tp.shape, (3, 4))
tp = ndt.type('Fixed * 3 * var * int32')
self.assertEqual(tp.shape, (-1, 3, -1))
tp = ndt.type('var * 3 * 2 * int32')
self.assertEqual(tp.shape, (-1, 3, 2))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================
Upper Air Sounding Tutorial
===========================
Upper air analysis is a staple of many synoptic and mesoscale analysis
problems. In this tutorial we will gather weather balloon data, plot it,
perform a series of thermodynamic calculations, and summarize the results.
To learn more about the Skew-T diagram and its use in weather analysis and
forecasting, checkout `this <https://homes.comet.ucar.edu/~alanbol/aws-tr-79-006.pdf>`_
air weather service guide.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Upper air data can be obtained using the siphon package, but for this tutorial we will use
# some of MetPy's sample data. This event is the Veterans Day tornado outbreak in 2002.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatially.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Basic Skew-T Plotting
# ---------------------
#
# The Skew-T (log-P) diagram is the standard way to view rawinsonde data. The
# y-axis is height in pressure coordinates and the x-axis is temperature. The
# y coordinates are plotted on a logarithmic scale and the x coordinate system
# is skewed. An explanation of skew-T interpretation is beyond the scope of this
# tutorial, but here we will plot one that can be used for analysis or
# publication.
#
# The most basic skew-T can be plotted with only five lines of Python.
# These lines perform the following tasks:
#
# 1. Create a ``Figure`` object and set the size of the figure.
#
# 2. Create a ``SkewT`` object
#
# 3. Plot the pressure and temperature (note that the pressure,
# the independent variable, is first even though it is plotted on the y-axis).
#
# 4. Plot the pressure and dewpoint temperature.
#
# 5. Plot the wind barbs at the appropriate pressure using the u and v wind
# components.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
##########################################################################
# Advanced Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
##########################################################################
# Adding a Hodograph
# ------------------
#
# A hodograph is a polar representation of the wind profile measured by the rawinsonde.
# Winds at different levels are plotted as vectors with their tails at the origin, the angle
# from the vertical axes representing the direction, and the length representing the speed.
# The line plotted on the hodograph is a line connecting the tips of these vectors,
# which are not drawn.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Chain Tests."""
# Dependency imports
import mock
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
class ShapeChanging(tfb.Bijector):
"""Only used for op_ndims manipulation."""
def __init__(self,
forward_min_event_ndims=0,
inverse_min_event_ndims=3):
super(ShapeChanging, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
validate_args=False, name="shape_changer")
class PermuteParts(tfb.Bijector):
"""Only used for op_ndims manipulation."""
def __init__(self):
super(PermuteParts, self).__init__(
forward_min_event_ndims=[0, 0],
inverse_min_event_ndims=[0, 0],
validate_args=False, name="permute_parts")
def forward_event_ndims(self, event_ndims):
return [event_ndims[1], event_ndims[0]]
def inverse_event_ndims(self, event_ndims):
return [event_ndims[1], event_ndims[0]]
@property
def _parts_interact(self):
return False
@test_util.test_all_tf_execution_regimes
class ChainBijectorTest(test_util.TestCase):
"""Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation."""
def testBijector(self):
chain = tfb.Chain((tfb.Exp(), tfb.Softplus()))
self.assertStartsWith(chain.name, "chain_of_exp_of_softplus")
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(1. + np.exp(x), self.evaluate(chain.forward(x)))
self.assertAllClose(np.log(x - 1.), self.evaluate(chain.inverse(x)))
self.assertAllClose(
-np.sum(np.log(x - 1.), axis=2),
self.evaluate(chain.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(x, axis=2),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
def testBijectorIdentity(self):
chain = tfb.Chain()
self.assertStartsWith(chain.name, "identity")
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(x, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(x)))
self.assertAllClose(
0., self.evaluate(chain.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
0., self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
def testNestedDtype(self):
chain = tfb.Chain([
tfb.Identity(),
tfb.Scale(tf.constant(2., tf.float64)),
tfb.Identity()
])
self.assertAllClose(tf.constant([2, 4, 6], tf.float64),
self.evaluate(chain.forward([1, 2, 3])))
def testScalarCongruency(self):
chain = tfb.Chain((tfb.Exp(), tfb.Softplus()))
bijector_test_util.assert_scalar_congruency(
chain, lower_x=1e-3, upper_x=1.5, rtol=0.05, eval_func=self.evaluate)
def testShapeGetters(self):
chain = tfb.Chain([
tfb.SoftmaxCentered(validate_args=True),
tfb.SoftmaxCentered(validate_args=True),
])
x = tf.TensorShape([1])
y = tf.TensorShape([2 + 1])
self.assertAllEqual(y, chain.forward_event_shape(x))
self.assertAllEqual(
tensorshape_util.as_list(y),
self.evaluate(
chain.forward_event_shape_tensor(tensorshape_util.as_list(x))))
self.assertAllEqual(x, chain.inverse_event_shape(y))
self.assertAllEqual(
tensorshape_util.as_list(x),
self.evaluate(
chain.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
def _validateChainMinEventNdims(self,
bijectors,
forward_min_event_ndims,
inverse_min_event_ndims):
chain = tfb.Chain(bijectors)
self.assertAllEqual(forward_min_event_ndims,
chain.forward_min_event_ndims)
self.assertAllEqual(inverse_min_event_ndims,
chain.inverse_min_event_ndims)
chain_inverse = tfb.Chain([tfb.Invert(b) for b in reversed(bijectors)])
self.assertAllEqual(forward_min_event_ndims,
chain_inverse.inverse_min_event_ndims)
self.assertAllEqual(inverse_min_event_ndims,
chain_inverse.forward_min_event_ndims)
def testMinEventNdimsChain(self):
self._validateChainMinEventNdims(
bijectors=[
tfb.Exp(),
tfb.Exp(),
tfb.Exp()
],
forward_min_event_ndims=0,
inverse_min_event_ndims=0)
self._validateChainMinEventNdims(
bijectors=[
tfb.ScaleMatvecDiag(scale_diag=[1., 1.]),
tfb.ScaleMatvecDiag(scale_diag=[1., 1.]),
tfb.ScaleMatvecDiag(scale_diag=[1., 1.])
],
forward_min_event_ndims=1,
inverse_min_event_ndims=1)
self._validateChainMinEventNdims(
bijectors=[
tfb.Exp(),
tfb.ScaleMatvecDiag(scale_diag=[1., 1.])
],
forward_min_event_ndims=1,
inverse_min_event_ndims=1)
self._validateChainMinEventNdims(
bijectors=[
tfb.ScaleMatvecDiag(scale_diag=[1., 1.]),
tfb.Exp(),
tfb.Softplus(),
tfb.ScaleMatvecDiag(scale_diag=[1., 1.])
],
forward_min_event_ndims=1,
inverse_min_event_ndims=1)
def testMinEventNdimsShapeChangingAddDims(self):
self._validateChainMinEventNdims(
bijectors=[
ShapeChanging()
],
forward_min_event_ndims=0,
inverse_min_event_ndims=3)
self._validateChainMinEventNdims(
bijectors=[
ShapeChanging(),
tfb.ScaleMatvecDiag(scale_diag=[1., 1.])
],
forward_min_event_ndims=1,
inverse_min_event_ndims=4)
self._validateChainMinEventNdims(
bijectors=[
ShapeChanging(),
ShapeChanging()
],
forward_min_event_ndims=0,
inverse_min_event_ndims=6)
def testMinEventNdimsShapeChangingAddRemoveDims(self):
self._validateChainMinEventNdims(
bijectors=[
ShapeChanging(2, 1),
ShapeChanging(3, 0),
ShapeChanging(1, 2)
],
forward_min_event_ndims=4,
inverse_min_event_ndims=1)
def testMinEventNdimsWithJointMap(self):
jm_0 = tfb.JointMap([ShapeChanging(1, 1), ShapeChanging(3, 1)])
split = ShapeChanging(1, [1, 1])
concat = ShapeChanging([1, 1], 1)
jm_1 = tfb.JointMap([ShapeChanging(1, 0), ShapeChanging(1, 1)])
permute = PermuteParts()
self._validateChainMinEventNdims(
bijectors=[jm_0, split, concat, jm_1],
forward_min_event_ndims=[4, 3],
inverse_min_event_ndims=[3, 1])
self._validateChainMinEventNdims(
bijectors=[jm_0, jm_1],
forward_min_event_ndims=[2, 3],
inverse_min_event_ndims=[1, 1])
self._validateChainMinEventNdims(
bijectors=[jm_1, jm_0],
forward_min_event_ndims=[1, 3],
inverse_min_event_ndims=[0, 1])
self._validateChainMinEventNdims(
bijectors=[jm_1, permute, jm_0],
forward_min_event_ndims=[1, 3],
inverse_min_event_ndims=[0, 1])
self._validateChainMinEventNdims(
bijectors=[jm_0, split],
forward_min_event_ndims=3,
inverse_min_event_ndims=[3, 1])
self._validateChainMinEventNdims(
bijectors=[permute, jm_1, split],
forward_min_event_ndims=1,
inverse_min_event_ndims=[1, 0])
def testMinEventNdimsWithPartiallyDependentJointMap(self):
dependent = tfb.Chain([tfb.Split(2), tfb.Invert(tfb.Split(2))])
wrap_in_list = tfb.Restructure(input_structure=[0, 1],
output_structure=[[0, 1]])
dependent_as_chain = tfb.Chain([
tfb.Invert(wrap_in_list),
tfb.JointMap([dependent]),
wrap_in_list])
self.assertAllEqualNested(dependent.forward_min_event_ndims,
dependent_as_chain.forward_min_event_ndims)
self.assertAllEqualNested(dependent.inverse_min_event_ndims,
dependent_as_chain.inverse_min_event_ndims)
self.assertAllEqualNested(dependent._parts_interact,
dependent_as_chain._parts_interact)
def testInvalidChainNdimsRaisesError(self):
with self.assertRaisesRegexp(
ValueError,
"Differences between `event_ndims` and `min_event_ndims must be equal"):
tfb.Chain([ShapeChanging([1, 1], [1, 1]),
ShapeChanging([1, 1], [2, 1])])
def testChainExpAffine(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = tfb.Chain([tfb.Exp(), tfb.ScaleMatvecDiag(scale_diag=scale_diag)])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 27.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(scale_diag * x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(scale_diag * x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testChainAffineExp(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = tfb.Chain([tfb.ScaleMatvecDiag(scale_diag=scale_diag), tfb.Exp()])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 9.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testEventNdimsIsOptional(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = tfb.Chain([tfb.ScaleMatvecDiag(scale_diag=scale_diag), tfb.Exp()])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 9.]
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(x),
self.evaluate(chain.forward_log_det_jacobian(x)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(x),
self.evaluate(chain.inverse_log_det_jacobian(y)))
def testChainIldjWithPlaceholder(self):
chain = tfb.Chain((tfb.Exp(), tfb.Exp()))
samples = tf1.placeholder_with_default(
np.zeros([2, 10], np.float32), shape=None)
ildj = chain.inverse_log_det_jacobian(samples, event_ndims=0)
self.assertIsNotNone(ildj)
self.evaluate(ildj)
def testChainDynamicToStatic(self):
if tf.executing_eagerly():
return
def xform_dynamic(x):
return tf1.placeholder_with_default(x, shape=None)
def xform_static(x):
# Copy the Tensor, because otherwise the set_shape can pass information
# into the past.
x = tf.identity(x)
tensorshape_util.set_shape(x, [1])
return x
def ldj(_):
return tf.constant(1.)
# The issue was that the sample's shape was going in-and-out of being fully
# specified, causing internal consistency issues inside the bijector.
chain = tfb.Chain([
tfb.Inline(
inverse_fn=xform_dynamic,
forward_min_event_ndims=0,
forward_log_det_jacobian_fn=ldj,
forward_fn=xform_dynamic),
tfb.Inline(
inverse_fn=xform_static,
forward_min_event_ndims=0,
forward_log_det_jacobian_fn=ldj,
forward_fn=xform_static),
tfb.Inline(
inverse_fn=xform_dynamic,
forward_min_event_ndims=0,
forward_log_det_jacobian_fn=ldj,
forward_fn=xform_dynamic)
])
ildj = chain.inverse_log_det_jacobian(
tf.zeros((2, 3), dtype=tf.float32), event_ndims=1)
# The shape of `ildj` is known statically to be scalar; its value is
# not statically known.
self.assertTrue(tensorshape_util.is_fully_defined(ildj.shape))
# `ldj_reduce_shape` uses `prefer_static` to get input shapes. That means
# that we respect statically-known shape information where present.
# In this case, the manually-assigned static shape is incorrect.
self.assertEqual(self.evaluate(ildj), -7.)
# Ditto.
fldj = chain.forward_log_det_jacobian([0.], event_ndims=0)
self.assertTrue(tensorshape_util.is_fully_defined(fldj.shape))
self.assertEqual(self.evaluate(fldj), 3.)
def testDofChangeError(self):
exp = tfb.Exp()
smc = tfb.SoftmaxCentered()
# Increase in event-size is the last step. No problems here.
safe_bij = tfb.Chain([smc, exp],
validate_args=True,
validate_event_size=True)
self.evaluate(safe_bij.forward_log_det_jacobian([1., 2., 3.], 1))
# Increase in event-size before Exp.
raise_bij = tfb.Chain([exp, smc],
validate_args=True,
validate_event_size=True)
with self.assertRaisesRegex((ValueError, tf.errors.InvalidArgumentError),
r".+degrees of freedom.+"):
self.evaluate(raise_bij.forward_log_det_jacobian([1., 2., 3.], 1))
# When validate_args is False, warns instead of raising.
warn_bij = tfb.Chain([exp, smc],
validate_args=False,
validate_event_size=True)
with mock.patch.object(tf, "print", return_value=tf.no_op()) as mock_print:
self.evaluate(warn_bij.forward_log_det_jacobian([1., 2., 3.], 1))
print_args, _ = mock_print.call_args
self.assertRegex(print_args[0], r"WARNING:.+degrees of freedom")
# When validate_event_shape is False, neither warns nor raises.
ignore_bij = tfb.Chain([exp, smc], validate_event_size=False)
self.evaluate(ignore_bij.forward_log_det_jacobian([1., 2., 3.], 1))
def testDofValidationDoesNoHarm(self):
# Chain with no change in degrees-of-freedom.
bij = tfb.Chain([tfb.Exp()], validate_args=True, validate_event_size=True)
self.evaluate(bij.forward_log_det_jacobian([1., 2., 3.], 1))
@test_util.disable_test_for_backend(
disable_numpy=True, disable_jax=True,
reason="Numpy and JAX have no notion of CompositeTensor/saved_model.")
def testCompositeTensor(self):
exp = tfb.Exp()
sp = tfb.Softplus()
aff = tfb.Scale(scale=2.)
chain = tfb.Chain(bijectors=[exp, sp, aff])
self.assertIsInstance(chain, tf.__internal__.CompositeTensor)
# Bijector may be flattened into `Tensor` components and rebuilt.
flat = tf.nest.flatten(chain, expand_composites=True)
unflat = tf.nest.pack_sequence_as(chain, flat, expand_composites=True)
self.assertIsInstance(unflat, tfb.Chain)
# Bijector may be input to a `tf.function`-decorated callable.
@tf.function
def call_forward(bij, x):
return bij.forward(x)
x = tf.ones([2, 3], dtype=tf.float32)
self.assertAllClose(call_forward(unflat, x), chain.forward(x))
# TypeSpec can be encoded/decoded.
enc = tf.__internal__.saved_model.encode_structure(chain._type_spec)
dec = tf.__internal__.saved_model.decode_proto(enc)
self.assertEqual(chain._type_spec, dec)
def testNonCompositeTensor(self):
exp = tfb.Exp()
scale = test_util.NonCompositeTensorScale(scale=tf.constant(3.))
chain = tfb.Chain(bijectors=[exp, scale])
self.assertNotIsInstance(chain, tf.__internal__.CompositeTensor)
self.assertAllClose(chain.forward([1.]), exp.forward(scale.forward([1.])))
if __name__ == "__main__":
test_util.main()
|
|
# Copyright 2014 Rackspace, Andrew Melton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import unittest
import mock
from nova.cmd import idmapshift
def join_side_effect(root, *args):
path = root
if root != '/':
path += '/'
path += '/'.join(args)
return path
class FakeStat(object):
def __init__(self, uid, gid):
self.st_uid = uid
self.st_gid = gid
class BaseTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(BaseTestCase, self).__init__(*args, **kwargs)
self.uid_maps = [(0, 10000, 10), (10, 20000, 1000)]
self.gid_maps = [(0, 10000, 10), (10, 20000, 1000)]
class FindTargetIDTestCase(BaseTestCase):
def test_find_target_id_range_1_first(self):
actual_target = idmapshift.find_target_id(0, self.uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(10000, actual_target)
def test_find_target_id_inside_range_1(self):
actual_target = idmapshift.find_target_id(2, self.uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(10002, actual_target)
def test_find_target_id_range_2_first(self):
actual_target = idmapshift.find_target_id(10, self.uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(20000, actual_target)
def test_find_target_id_inside_range_2(self):
actual_target = idmapshift.find_target_id(100, self.uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(20090, actual_target)
def test_find_target_id_outside_range(self):
actual_target = idmapshift.find_target_id(10000, self.uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(idmapshift.NOBODY_ID, actual_target)
def test_find_target_id_no_mappings(self):
actual_target = idmapshift.find_target_id(0, [],
idmapshift.NOBODY_ID, dict())
self.assertEqual(idmapshift.NOBODY_ID, actual_target)
def test_find_target_id_updates_memo(self):
memo = dict()
idmapshift.find_target_id(0, self.uid_maps, idmapshift.NOBODY_ID, memo)
self.assertTrue(0 in memo)
self.assertEqual(10000, memo[0])
def test_find_target_guest_id_greater_than_count(self):
uid_maps = [(500, 10000, 10)]
# Below range
actual_target = idmapshift.find_target_id(499, uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(idmapshift.NOBODY_ID, actual_target)
# Match
actual_target = idmapshift.find_target_id(501, uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(10001, actual_target)
# Beyond range
actual_target = idmapshift.find_target_id(510, uid_maps,
idmapshift.NOBODY_ID, dict())
self.assertEqual(idmapshift.NOBODY_ID, actual_target)
class ShiftPathTestCase(BaseTestCase):
@mock.patch('os.lchown')
@mock.patch('os.lstat')
def test_shift_path(self, mock_lstat, mock_lchown):
mock_lstat.return_value = FakeStat(0, 0)
idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, dict(), dict())
mock_lstat.assert_has_calls([mock.call('/test/path')])
mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)])
@mock.patch('os.lchown')
@mock.patch('os.lstat')
def test_shift_path_dry_run(self, mock_lstat, mock_lchown):
mock_lstat.return_value = FakeStat(0, 0)
idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, dict(), dict(),
dry_run=True)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertEqual(0, len(mock_lchown.mock_calls))
@mock.patch('os.lchown')
@mock.patch('nova.cmd.idmapshift.print_chown')
@mock.patch('os.lstat')
def test_shift_path_verbose(self, mock_lstat, mock_print, mock_lchown):
mock_lstat.return_value = FakeStat(0, 0)
idmapshift.shift_path('/test/path', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, dict(), dict(),
verbose=True)
mock_lstat.assert_has_calls([mock.call('/test/path')])
mock_print_call = mock.call('/test/path', 0, 0, 10000, 10000)
mock_print.assert_has_calls([mock_print_call])
mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)])
class ShiftDirTestCase(BaseTestCase):
@mock.patch('nova.cmd.idmapshift.shift_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_shift_dir(self, mock_walk, mock_join, mock_shift_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
idmapshift.shift_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID)
files = ['a', 'b', 'c', 'd']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID)
kwargs = dict(dry_run=False, verbose=False,
uid_memo=dict(), gid_memo=dict())
shift_path_calls = [mock.call('/', *args, **kwargs)]
shift_path_calls += [mock.call('/' + x, *args, **kwargs)
for x in files]
mock_shift_path.assert_has_calls(shift_path_calls)
@mock.patch('nova.cmd.idmapshift.shift_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_shift_dir_dry_run(self, mock_walk, mock_join, mock_shift_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
idmapshift.shift_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, dry_run=True)
mock_walk.assert_has_calls([mock.call('/')])
files = ['a', 'b', 'c', 'd']
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_maps, self.gid_maps, idmapshift.NOBODY_ID)
kwargs = dict(dry_run=True, verbose=False,
uid_memo=dict(), gid_memo=dict())
shift_path_calls = [mock.call('/', *args, **kwargs)]
shift_path_calls += [mock.call('/' + x, *args, **kwargs)
for x in files]
mock_shift_path.assert_has_calls(shift_path_calls)
class ConfirmPathTestCase(unittest.TestCase):
@mock.patch('os.lstat')
def test_confirm_path(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 301)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(50000, 50000)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_uid_mismatch(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(0, 301)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertFalse(result)
@mock.patch('os.lstat')
def test_confirm_path_gid_mismatch(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 0)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertFalse(result)
@mock.patch('os.lstat')
def test_confirm_path_uid_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(50000, 301)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_gid_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 50000)
result = idmapshift.confirm_path('/test/path', uid_ranges, gid_ranges,
50000)
mock_lstat.assert_has_calls(mock.call('/test/path'))
self.assertTrue(result)
class ConfirmDirTestCase(BaseTestCase):
def setUp(self):
self.uid_map_ranges = idmapshift.get_ranges(self.uid_maps)
self.gid_map_ranges = idmapshift.get_ranges(self.gid_maps)
@mock.patch('nova.cmd.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir(self, mock_walk, mock_join, mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
mock_confirm_path.return_value = True
idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID)
files = ['a', 'b', 'c', 'd']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
confirm_path_calls += [mock.call('/' + x, *args)
for x in files]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.cmd.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_root(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
mock_confirm_path.return_value = False
idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID)
args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.cmd.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_file(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
def confirm_path_side_effect(path, *args):
if 'a' in path:
return False
return True
mock_confirm_path.side_effect = confirm_path_side_effect
idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID)
mock_walk.assert_has_calls([mock.call('/')])
mock_join.assert_has_calls([mock.call('/', 'a')])
args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args),
mock.call('/' + 'a', *args)]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.cmd.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_dir(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
def confirm_path_side_effect(path, *args):
if 'c' in path:
return False
return True
mock_confirm_path.side_effect = confirm_path_side_effect
idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID)
files = ['a', 'b', 'c']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_map_ranges, self.gid_map_ranges, idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
confirm_path_calls += [mock.call('/' + x, *args)
for x in files]
mock_confirm_path.assert_has_calls(confirm_path_calls)
class IDMapTypeTestCase(unittest.TestCase):
def test_id_map_type(self):
result = idmapshift.id_map_type("1:1:1,2:2:2")
self.assertEqual([(1, 1, 1), (2, 2, 2)], result)
def test_id_map_type_not_int(self):
self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type,
"a:1:1")
def test_id_map_type_not_proper_format(self):
self.assertRaises(argparse.ArgumentTypeError, idmapshift.id_map_type,
"1:1")
class MainTestCase(BaseTestCase):
@mock.patch('nova.cmd.idmapshift.shift_dir')
@mock.patch('argparse.ArgumentParser')
def test_main(self, mock_parser_class, mock_shift_dir):
mock_parser = mock.MagicMock()
mock_parser.parse_args.return_value = mock_parser
mock_parser.idempotent = False
mock_parser.confirm = False
mock_parser.path = '/test/path'
mock_parser.uid = self.uid_maps
mock_parser.gid = self.gid_maps
mock_parser.nobody = idmapshift.NOBODY_ID
mock_parser.dry_run = False
mock_parser.verbose = False
mock_parser_class.return_value = mock_parser
idmapshift.main()
mock_shift_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID,
dry_run=False, verbose=False)
mock_shift_dir.assert_has_calls([mock_shift_dir_call])
@mock.patch('nova.cmd.idmapshift.shift_dir')
@mock.patch('nova.cmd.idmapshift.confirm_dir')
@mock.patch('argparse.ArgumentParser')
def test_main_confirm_dir_idempotent_unshifted(self, mock_parser_class,
mock_confirm_dir,
mock_shift_dir):
mock_parser = mock.MagicMock()
mock_parser.parse_args.return_value = mock_parser
mock_parser.idempotent = True
mock_parser.confirm = False
mock_parser.path = '/test/path'
mock_parser.uid = self.uid_maps
mock_parser.gid = self.gid_maps
mock_parser.nobody = idmapshift.NOBODY_ID
mock_parser.dry_run = False
mock_parser.verbose = False
mock_parser_class.return_value = mock_parser
mock_confirm_dir.return_value = False
idmapshift.main()
mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
mock_shift_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID,
dry_run=False, verbose=False)
mock_shift_dir.assert_has_calls([mock_shift_dir_call])
@mock.patch('nova.cmd.idmapshift.shift_dir')
@mock.patch('nova.cmd.idmapshift.confirm_dir')
@mock.patch('argparse.ArgumentParser')
def test_main_confirm_dir_idempotent_shifted(self, mock_parser_class,
mock_confirm_dir,
mock_shift_dir):
mock_parser = mock.MagicMock()
mock_parser.parse_args.return_value = mock_parser
mock_parser.idempotent = True
mock_parser.confirm = False
mock_parser.path = '/test/path'
mock_parser.uid = self.uid_maps
mock_parser.gid = self.gid_maps
mock_parser.nobody = idmapshift.NOBODY_ID
mock_parser.dry_run = False
mock_parser.verbose = False
mock_parser_class.return_value = mock_parser
mock_confirm_dir.return_value = True
try:
idmapshift.main()
except SystemExit as sys_exit:
self.assertEqual(sys_exit.code, 0)
mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
mock_shift_dir.assert_has_calls([])
@mock.patch('nova.cmd.idmapshift.shift_dir')
@mock.patch('nova.cmd.idmapshift.confirm_dir')
@mock.patch('argparse.ArgumentParser')
def test_main_confirm_dir_confirm_unshifted(self, mock_parser_class,
mock_confirm_dir,
mock_shift_dir):
mock_parser = mock.MagicMock()
mock_parser.parse_args.return_value = mock_parser
mock_parser.idempotent = False
mock_parser.confirm = True
mock_parser.exit_on_fail = True
mock_parser.path = '/test/path'
mock_parser.uid = self.uid_maps
mock_parser.gid = self.gid_maps
mock_parser.nobody = idmapshift.NOBODY_ID
mock_parser.dry_run = False
mock_parser.verbose = False
mock_parser_class.return_value = mock_parser
mock_confirm_dir.return_value = False
try:
idmapshift.main()
except SystemExit as sys_exit:
self.assertEqual(sys_exit.code, 1)
mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
mock_shift_dir.assert_has_calls([])
@mock.patch('nova.cmd.idmapshift.shift_dir')
@mock.patch('nova.cmd.idmapshift.confirm_dir')
@mock.patch('argparse.ArgumentParser')
def test_main_confirm_dir_confirm_shifted(self, mock_parser_class,
mock_confirm_dir,
mock_shift_dir):
mock_parser = mock.MagicMock()
mock_parser.parse_args.return_value = mock_parser
mock_parser.idempotent = False
mock_parser.confirm = True
mock_parser.exit_on_fail = True
mock_parser.path = '/test/path'
mock_parser.uid = self.uid_maps
mock_parser.gid = self.gid_maps
mock_parser.nobody = idmapshift.NOBODY_ID
mock_parser.dry_run = False
mock_parser.verbose = False
mock_parser_class.return_value = mock_parser
mock_confirm_dir.return_value = True
try:
idmapshift.main()
except SystemExit as sys_exit:
self.assertEqual(sys_exit.code, 0)
mock_confirm_dir_call = mock.call('/test/path', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
mock_confirm_dir.assert_has_calls([mock_confirm_dir_call])
mock_shift_dir.assert_has_calls([])
class IntegrationTestCase(BaseTestCase):
@mock.patch('os.lchown')
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_shift_dir(self, mock_walk, mock_join, mock_lstat,
mock_lchown):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(0, 0),
'a': FakeStat(0, 0),
'b': FakeStat(0, 2),
'c': FakeStat(30000, 30000),
'd': FakeStat(100, 100),
'1': FakeStat(0, 100),
'2': FakeStat(100, 100),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, verbose=True)
lchown_calls = [
mock.call('/tmp/test', 10000, 10000),
mock.call('/tmp/test/a', 10000, 10000),
mock.call('/tmp/test/b', 10000, 10002),
mock.call('/tmp/test/c', idmapshift.NOBODY_ID,
idmapshift.NOBODY_ID),
mock.call('/tmp/test/d', 20090, 20090),
mock.call('/tmp/test/d/1', 10000, 20090),
mock.call('/tmp/test/d/2', 20090, 20090),
]
mock_lchown.assert_has_calls(lchown_calls)
@mock.patch('os.lchown')
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_shift_dir_dry_run(self, mock_walk, mock_join,
mock_lstat, mock_lchown):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(0, 0),
'a': FakeStat(0, 0),
'b': FakeStat(0, 2),
'c': FakeStat(30000, 30000),
'd': FakeStat(100, 100),
'1': FakeStat(0, 100),
'2': FakeStat(100, 100),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
idmapshift.shift_dir('/tmp/test', self.uid_maps, self.gid_maps,
idmapshift.NOBODY_ID, dry_run=True, verbose=True)
self.assertEqual(0, len(mock_lchown.mock_calls))
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_confirm_dir_shifted(self, mock_walk, mock_join,
mock_lstat):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(10000, 10000),
'a': FakeStat(10000, 10000),
'b': FakeStat(10000, 10002),
'c': FakeStat(idmapshift.NOBODY_ID, idmapshift.NOBODY_ID),
'd': FakeStat(20090, 20090),
'1': FakeStat(10000, 20090),
'2': FakeStat(20090, 20090),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
result = idmapshift.confirm_dir('/tmp/test', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
self.assertTrue(result)
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_confirm_dir_unshifted(self, mock_walk, mock_join,
mock_lstat):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(0, 0),
'a': FakeStat(0, 0),
'b': FakeStat(0, 2),
'c': FakeStat(30000, 30000),
'd': FakeStat(100, 100),
'1': FakeStat(0, 100),
'2': FakeStat(100, 100),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
result = idmapshift.confirm_dir('/tmp/test', self.uid_maps,
self.gid_maps, idmapshift.NOBODY_ID)
self.assertFalse(result)
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from telemetry.core import platform
from telemetry.core import util
from telemetry.core.platform import profiler
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.util import support_binaries
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib.perf import perf_control # pylint: disable=F0401
_PERF_OPTIONS = [
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
_PERF_OPTIONS_ANDROID = [
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
]
def _NicePath(path):
rel_path = os.path.relpath(path, os.curdir)
return rel_path if len(rel_path) < len(path) else path
def _PrepareHostForPerf():
kptr_file = '/proc/sys/kernel/kptr_restrict'
with open(kptr_file) as f:
if f.read().strip() != '0':
logging.warning('Making kernel symbols unrestricted. You might have to '
'enter your password for "sudo".')
with tempfile.NamedTemporaryFile() as zero:
zero.write('0')
zero.flush()
subprocess.call(['sudo', 'cp', zero.name, kptr_file])
def _InstallPerfHost():
host = platform.GetHostPlatform()
if not host.CanLaunchApplication('perfhost'):
host.InstallApplication('perfhost')
return support_binaries.FindPath('perfhost', host.GetOSName())
class _SingleProcessPerfProfiler(object):
"""An internal class for using perf for a given process.
On android, this profiler uses pre-built binaries from AOSP.
See more details in prebuilt/android/README.txt.
"""
def __init__(self, pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary):
self._pid = pid
self._browser_backend = browser_backend
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._is_android = platform_backend.GetOSName() == 'android'
self._perfhost_binary = perfhost_binary
cmd_prefix = []
perf_args = ['record', '--pid', str(pid)]
if self._is_android:
cmd_prefix = ['adb', '-s', browser_backend.adb.device_serial(), 'shell',
perf_binary]
perf_args += _PERF_OPTIONS_ANDROID
output_file = os.path.join('/sdcard', 'perf_profiles',
os.path.basename(output_file))
self._device_output_file = output_file
browser_backend.adb.RunShellCommand(
'mkdir -p ' + os.path.dirname(self._device_output_file))
browser_backend.adb.RunShellCommand('rm -f ' + self._device_output_file)
else:
cmd_prefix = [perf_binary]
perf_args += ['--output', output_file] + _PERF_OPTIONS
self._proc = subprocess.Popen(cmd_prefix + perf_args,
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if ('renderer' in self._output_file and
not self._is_android and
not self._platform_backend.GetCommandLine(self._pid)):
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
if self._is_android:
device = self._browser_backend.adb.device()
device.KillAll('perf', signum=signal.SIGINT, blocking=True)
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code == 128:
raise Exception(
"""perf failed with exit code 128.
Try rerunning this script under sudo or setting
/proc/sys/kernel/perf_event_paranoid to "-1".\nOutput:\n%s""" %
self._GetStdOut())
elif exit_code not in (0, -2):
raise Exception(
'perf failed with exit code %d. Output:\n%s' % (exit_code,
self._GetStdOut()))
finally:
self._tmp_output_file.close()
cmd = '%s report -n -i %s' % (_NicePath(self._perfhost_binary),
self._output_file)
if self._is_android:
device = self._browser_backend.adb.device()
device.old_interface.Adb().Pull(self._device_output_file,
self._output_file)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
self._output_file)
symfs_root = os.path.dirname(self._output_file)
kallsyms = android_profiling_helper.CreateSymFs(device,
symfs_root,
required_libs,
use_symlinks=True)
cmd += ' --symfs %s --kallsyms %s' % (symfs_root, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_root, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % _NicePath(objdump_path)
break
print 'To view the profile, run:'
print ' ', cmd
return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class PerfProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(PerfProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
self._perf_control = None
perf_binary = perfhost_binary = _InstallPerfHost()
try:
if platform_backend.GetOSName() == 'android':
device = browser_backend.adb.device()
perf_binary = android_profiling_helper.PrepareDeviceForPerf(device)
self._perf_control = perf_control.PerfControl(device)
self._perf_control.SetPerfProfilingMode()
else:
_PrepareHostForPerf()
for pid, output_file in process_output_file_map.iteritems():
if 'zygote' in output_file:
continue
self._process_profilers.append(
_SingleProcessPerfProfiler(
pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary))
except:
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
raise
@classmethod
def name(cls):
return 'perf'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
if browser_type.startswith('cros'):
return False
return True
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging',
])
def CollectProfile(self):
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
@classmethod
def GetTopSamples(cls, file_name, number):
"""Parses the perf generated profile in |file_name| and returns a
{function: period} dict of the |number| hottests functions.
"""
assert os.path.exists(file_name)
with open(os.devnull, 'w') as devnull:
_InstallPerfHost()
report = subprocess.Popen(
['perfhost', 'report', '--show-total-period', '-U', '-t', '^', '-i',
file_name],
stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
period_by_function = {}
for line in report.split('\n'):
if not line or line.startswith('#'):
continue
fields = line.split('^')
if len(fields) != 5:
continue
period = int(fields[1])
function = fields[4].partition(' ')[2]
function = re.sub('<.*>', '', function) # Strip template params.
function = re.sub('[(].*[)]', '', function) # Strip function params.
period_by_function[function] = period
if len(period_by_function) == number:
break
return period_by_function
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
determine_ext,
dict_get,
ExtractorError,
int_or_none,
parse_duration,
try_get,
unified_strdate,
)
class XHamsterIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:.+?\.)?xhamster\.com/
(?:
movies/(?P<id>\d+)/(?P<display_id>[^/]*)\.html|
videos/(?P<display_id_2>[^/]*)-(?P<id_2>\d+)
)
'''
_TESTS = [{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
'md5': '8281348b8d3c53d39fffb377d24eac4e',
'info_dict': {
'id': '1509445',
'display_id': 'femaleagent_shy_beauty_takes_the_bait',
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'timestamp': 1350194821,
'upload_date': '20121014',
'uploader': 'Ruseful2011',
'duration': 893,
'age_limit': 18,
'categories': ['Fake Hub', 'Amateur', 'MILFs', 'POV', 'Boss', 'Office', 'Oral', 'Reality', 'Sexy'],
},
}, {
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
'info_dict': {
'id': '2221348',
'display_id': 'britney_spears_sexy_booty',
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'timestamp': 1379123460,
'upload_date': '20130914',
'uploader': 'jojo747400',
'duration': 200,
'age_limit': 18,
'categories': ['Britney Spears', 'Celebrities', 'HD Videos', 'Sexy', 'Sexy Booty'],
},
'params': {
'skip_download': True,
},
}, {
# empty seo
'url': 'http://xhamster.com/movies/5667973/.html',
'info_dict': {
'id': '5667973',
'ext': 'mp4',
'title': '....',
'timestamp': 1454948101,
'upload_date': '20160208',
'uploader': 'parejafree',
'duration': 72,
'age_limit': 18,
'categories': ['Amateur', 'Blowjobs'],
},
'params': {
'skip_download': True,
},
}, {
# mobile site
'url': 'https://m.xhamster.com/videos/cute-teen-jacqueline-solo-masturbation-8559111',
'only_matching': True,
}, {
'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
'only_matching': True,
}, {
# This video is visible for marcoalfa123456's friends only
'url': 'https://it.xhamster.com/movies/7263980/la_mia_vicina.html',
'only_matching': True,
}, {
# new URL schema
'url': 'https://pt.xhamster.com/videos/euro-pedal-pumping-7937821',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_2')
display_id = mobj.group('display_id') or mobj.group('display_id_2')
desktop_url = re.sub(r'^(https?://(?:.+?\.)?)m\.', r'\1', url)
webpage = self._download_webpage(desktop_url, video_id)
error = self._html_search_regex(
r'<div[^>]+id=["\']videoClosed["\'][^>]*>(.+?)</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
age_limit = self._rta_search(webpage)
def get_height(s):
return int_or_none(self._search_regex(
r'^(\d+)[pP]', s, 'height', default=None))
initials = self._parse_json(
self._search_regex(
r'window\.initials\s*=\s*({.+?})\s*;\s*\n', webpage, 'initials',
default='{}'),
video_id, fatal=False)
if initials:
video = initials['videoModel']
title = video['title']
formats = []
for format_id, formats_dict in video['sources'].items():
if not isinstance(formats_dict, dict):
continue
for quality, format_item in formats_dict.items():
if format_id == 'download':
# Download link takes some time to be generated,
# skipping for now
continue
if not isinstance(format_item, dict):
continue
format_url = format_item.get('link')
filesize = int_or_none(
format_item.get('size'), invscale=1000000)
else:
format_url = format_item
filesize = None
if not isinstance(format_url, compat_str):
continue
formats.append({
'format_id': '%s-%s' % (format_id, quality),
'url': format_url,
'ext': determine_ext(format_url, 'mp4'),
'height': get_height(quality),
'filesize': filesize,
})
self._sort_formats(formats)
categories_list = video.get('categories')
if isinstance(categories_list, list):
categories = []
for c in categories_list:
if not isinstance(c, dict):
continue
c_name = c.get('name')
if isinstance(c_name, compat_str):
categories.append(c_name)
else:
categories = None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': video.get('description'),
'timestamp': int_or_none(video.get('created')),
'uploader': try_get(
video, lambda x: x['author']['name'], compat_str),
'thumbnail': video.get('thumbURL'),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(try_get(
video, lambda x: x['rating']['likes'], int)),
'dislike_count': int_or_none(try_get(
video, lambda x: x['rating']['dislikes'], int)),
'comment_count': int_or_none(video.get('views')),
'age_limit': age_limit,
'categories': categories,
'formats': formats,
}
# Old layout fallback
title = self._html_search_regex(
[r'<h1[^>]*>([^<]+)</h1>',
r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
webpage, 'title')
formats = []
format_urls = set()
sources = self._parse_json(
self._search_regex(
r'sources\s*:\s*({.+?})\s*,?\s*\n', webpage, 'sources',
default='{}'),
video_id, fatal=False)
for format_id, format_url in sources.items():
if not isinstance(format_url, compat_str):
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append({
'format_id': format_id,
'url': format_url,
'height': get_height(format_id),
})
video_url = self._search_regex(
[r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
webpage, 'video url', group='mp4', default=None)
if video_url and video_url not in format_urls:
formats.append({
'url': video_url,
})
self._sort_formats(formats)
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
description = mobj.group(1) if mobj else None
upload_date = unified_strdate(self._search_regex(
r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
webpage, 'upload date', fatal=False))
uploader = self._html_search_regex(
r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+><span[^>]+>([^<]+)',
webpage, 'uploader', default='anonymous')
thumbnail = self._search_regex(
[r'''["']thumbUrl["']\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
r'''<video[^>]+"poster"=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = parse_duration(self._search_regex(
[r'<[^<]+\bitemprop=["\']duration["\'][^<]+\bcontent=["\'](.+?)["\']',
r'Runtime:\s*</span>\s*([\d:]+)'], webpage,
'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'content=["\']User(?:View|Play)s:(\d+)',
webpage, 'view count', fatal=False))
mobj = re.search(r'hint=[\'"](?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes', webpage)
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
comment_count = mobj.group('commentcount') if mobj else 0
categories_html = self._search_regex(
r'(?s)<table.+?(<span>Categories:.+?)</table>', webpage,
'categories', default=None)
categories = [clean_html(category) for category in re.findall(
r'<a[^>]+>(.+?)</a>', categories_html)] if categories_html else None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
'comment_count': int_or_none(comment_count),
'age_limit': age_limit,
'categories': categories,
'formats': formats,
}
class XHamsterEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?xhamster\.com/xembed\.php\?video=(?P<id>\d+)'
_TEST = {
'url': 'http://xhamster.com/xembed.php?video=3328539',
'info_dict': {
'id': '3328539',
'ext': 'mp4',
'title': 'Pen Masturbation',
'timestamp': 1406581861,
'upload_date': '20140728',
'uploader': 'ManyakisArt',
'duration': 5,
'age_limit': 18,
}
}
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'href="(https?://xhamster\.com/(?:movies/{0}/[^"]*\.html|videos/[^/]*-{0})[^"]*)"'.format(video_id),
webpage, 'xhamster url', default=None)
if not video_url:
vars = self._parse_json(
self._search_regex(r'vars\s*:\s*({.+?})\s*,\s*\n', webpage, 'vars'),
video_id)
video_url = dict_get(vars, ('downloadLink', 'homepageLink', 'commentsLink', 'shareUrl'))
return self.url_result(video_url, 'XHamster')
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import importlib
from inspect import isabstract
from typing import Any
from unittest import TestCase, mock
from parameterized import parameterized
HOOK = [
(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook",
"airflow.contrib.hooks.gcp_compute_hook.GceHook",
),
(
"airflow.providers.google.cloud.hooks.base.CloudBaseHook",
"airflow.contrib.hooks.gcp_api_base_hook.GoogleCloudBaseHook",
),
(
"airflow.providers.google.cloud.hooks.dataflow.DataflowHook",
"airflow.contrib.hooks.gcp_dataflow_hook.DataFlowHook",
),
(
"airflow.providers.google.cloud.hooks.dataproc.DataprocHook",
"airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook",
),
(
"airflow.providers.google.cloud.hooks.dlp.CloudDLPHook",
"airflow.contrib.hooks.gcp_dlp_hook.CloudDLPHook",
),
(
"airflow.providers.google.cloud.hooks.functions.CloudFunctionsHook",
"airflow.contrib.hooks.gcp_function_hook.GcfHook",
),
(
"airflow.providers.google.cloud.hooks.kms.CloudKMSHook",
"airflow.contrib.hooks.gcp_kms_hook.GoogleCloudKMSHook",
),
(
"airflow.providers.google.cloud.hooks.mlengine.MLEngineHook",
"airflow.contrib.hooks.gcp_mlengine_hook.MLEngineHook",
),
(
"airflow.providers.google.cloud.hooks.spanner.SpannerHook",
"airflow.contrib.hooks.gcp_spanner_hook.CloudSpannerHook",
),
(
"airflow.providers.google.cloud.hooks.speech_to_text.CloudSpeechToTextHook",
"airflow.contrib.hooks.gcp_speech_to_text_hook.GCPSpeechToTextHook",
),
(
"airflow.providers.google.cloud.hooks.text_to_speech.CloudTextToSpeechHook",
"airflow.contrib.hooks.gcp_text_to_speech_hook.GCPTextToSpeechHook",
),
(
"airflow.providers.google.cloud.hooks.gcs.GCSHook",
"airflow.contrib.hooks.gcs_hook.GoogleCloudStorageHook",
),
(
"airflow.providers.google.cloud.hooks.cloud_build.CloudBuildHook",
"airflow.contrib.hooks.gcp_cloud_build_hook.CloudBuildHook",
),
(
"airflow.providers.google.cloud.hooks.bigtable.BigtableHook",
"airflow.contrib.hooks.gcp_bigtable_hook.BigtableHook",
),
(
"airflow.providers.google.cloud.hooks.kubernetes_engine.GKEHook",
"airflow.contrib.hooks.gcp_container_hook.GKEClusterHook",
),
(
"airflow.providers.google.cloud.hooks.datastore.DatastoreHook",
"airflow.contrib.hooks.datastore_hook.DatastoreHook",
),
(
"airflow.providers.google.cloud.hooks.natural_language.CloudNaturalLanguageHook",
"airflow.contrib.hooks.gcp_natural_language_hook.CloudNaturalLanguageHook",
),
(
"airflow.providers.google.cloud.hooks.pubsub.PubSubHook",
"airflow.contrib.hooks.gcp_pubsub_hook.PubSubHook",
),
(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLHook",
"airflow.contrib.hooks.gcp_sql_hook.CloudSqlHook",
),
(
"airflow.providers.google.cloud.hooks.cloud_sql.CloudSQLDatabaseHook",
"airflow.contrib.hooks.gcp_sql_hook.CloudSqlDatabaseHook",
),
(
"airflow.providers.google.cloud.hooks.tasks.CloudTasksHook",
"airflow.contrib.hooks.gcp_tasks_hook.CloudTasksHook",
),
(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.CloudDataTransferServiceHook",
"airflow.contrib.hooks.gcp_transfer_hook.GCPTransferServiceHook",
),
(
"airflow.providers.google.cloud.hooks.translate.CloudTranslateHook",
"airflow.contrib.hooks.gcp_translate_hook.CloudTranslateHook",
),
(
"airflow.providers.google.cloud.hooks.video_intelligence.CloudVideoIntelligenceHook",
"airflow.contrib.hooks.gcp_video_intelligence_hook.CloudVideoIntelligenceHook",
),
(
"airflow.providers.google.cloud.hooks.vision.CloudVisionHook",
"airflow.contrib.hooks.gcp_vision_hook.CloudVisionHook",
),
(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook",
"airflow.contrib.hooks.bigquery_hook.BigQueryHook",
),
(
"airflow.providers.amazon.aws.hooks.athena.AWSAthenaHook",
"airflow.contrib.hooks.aws_athena_hook.AWSAthenaHook",
),
(
"airflow.providers.amazon.aws.hooks.datasync.AWSDataSyncHook",
"airflow.contrib.hooks.aws_datasync_hook.AWSDataSyncHook",
),
(
"airflow.providers.amazon.aws.hooks.s3.S3Hook",
"airflow.hooks.S3_hook.S3Hook",
),
(
"airflow.providers.amazon.aws.hooks.sqs.SQSHook",
"airflow.contrib.hooks.aws_sqs_hook.SQSHook",
),
(
"airflow.providers.amazon.aws.hooks.lambda_function.AwsLambdaHook",
"airflow.contrib.hooks.aws_lambda_hook.AwsLambdaHook",
),
(
"airflow.providers.amazon.aws.hooks.sns.AwsSnsHook",
"airflow.contrib.hooks.aws_sns_hook.AwsSnsHook",
),
(
'airflow.providers.apache.pinot.hooks.pinot.PinotDbApiHook',
'airflow.contrib.hooks.pinot_hook.PinotDbApiHook',
),
(
'airflow.providers.apache.pinot.hooks.pinot.PinotAdminHook',
'airflow.contrib.hooks.pinot_hook.PinotAdminHook',
),
(
'airflow.providers.apache.spark.hooks.spark_jdbc.SparkJDBCHook',
'airflow.contrib.hooks.spark_jdbc_hook.SparkJDBCHook',
),
(
'airflow.providers.apache.spark.hooks.spark_sql.SparkSqlHook',
'airflow.contrib.hooks.spark_sql_hook.SparkSqlHook',
),
(
'airflow.providers.apache.spark.hooks.spark_submit.SparkSubmitHook',
'airflow.contrib.hooks.spark_submit_hook.SparkSubmitHook',
),
(
'airflow.providers.apache.sqoop.hooks.sqoop.SqoopHook',
'airflow.contrib.hooks.sqoop_hook.SqoopHook',
),
(
'airflow.providers.apache.druid.hooks.druid.DruidHook',
'airflow.hooks.druid_hook.DruidHook',
),
(
'airflow.providers.apache.druid.hooks.druid.DruidDbApiHook',
'airflow.hooks.druid_hook.DruidDbApiHook',
),
(
'airflow.providers.apache.hdfs.hooks.hdfs.HDFSHookException',
'airflow.hooks.hdfs_hook.HDFSHookException',
),
(
'airflow.providers.apache.hdfs.hooks.hdfs.HDFSHook',
'airflow.hooks.hdfs_hook.HDFSHook',
),
(
'airflow.providers.apache.hive.hooks.hive.HiveMetastoreHook',
'airflow.hooks.hive_hooks.HiveMetastoreHook',
),
(
'airflow.providers.apache.hive.hooks.hive.HiveCliHook',
'airflow.hooks.hive_hooks.HiveCliHook',
),
(
'airflow.providers.apache.hive.hooks.hive.HiveServer2Hook',
'airflow.hooks.hive_hooks.HiveServer2Hook',
),
(
'airflow.providers.apache.pig.hooks.pig.PigCliHook',
'airflow.hooks.pig_hook.PigCliHook',
),
(
'airflow.providers.apache.hdfs.hooks.webhdfs.WebHDFSHook',
'airflow.hooks.webhdfs_hook.WebHDFSHook',
),
(
'airflow.hooks.filesystem.FSHook',
'airflow.contrib.hooks.fs_hook.FSHook',
),
(
'airflow.providers.microsoft.azure.hooks.azure_container_instance.AzureContainerInstanceHook',
'airflow.contrib.hooks.azure_container_instance_hook.AzureContainerInstanceHook',
),
(
'airflow.providers.microsoft.azure.hooks.azure_container_registry.AzureContainerRegistryHook',
'airflow.contrib.hooks.azure_container_registry_hook.AzureContainerRegistryHook',
),
(
'airflow.providers.microsoft.azure.hooks.azure_container_volume.AzureContainerVolumeHook',
'airflow.contrib.hooks.azure_container_volume_hook.AzureContainerVolumeHook',
),
(
'airflow.providers.microsoft.azure.hooks.azure_cosmos.AzureCosmosDBHook',
'airflow.contrib.hooks.azure_cosmos_hook.AzureCosmosDBHook',
),
(
'airflow.providers.microsoft.azure.hooks.azure_fileshare.AzureFileShareHook',
'airflow.contrib.hooks.azure_fileshare_hook.AzureFileShareHook',
),
(
'airflow.providers.microsoft.azure.hooks.wasb.WasbHook',
'airflow.contrib.hooks.wasb_hook.WasbHook',
),
(
'airflow.providers.amazon.aws.hooks.glue_catalog.AwsGlueCatalogHook',
'airflow.contrib.hooks.aws_glue_catalog_hook.AwsGlueCatalogHook',
),
(
'airflow.providers.amazon.aws.hooks.logs.AwsLogsHook',
'airflow.contrib.hooks.aws_logs_hook.AwsLogsHook',
),
(
'airflow.providers.amazon.aws.hooks.emr.EmrHook',
'airflow.contrib.hooks.emr_hook.EmrHook',
),
(
'airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook',
'airflow.contrib.hooks.sagemaker_hook.SageMakerHook',
),
(
'airflow.providers.mongo.hooks.mongo.MongoHook',
'airflow.contrib.hooks.mongo_hook.MongoHook',
),
(
'airflow.providers.openfass.hooks.openfaas.OpenFaasHook',
'airflow.contrib.hooks.openfaas_hook.OpenFaasHook',
),
(
'airflow.providers.redis.hooks.redis.RedisHook',
'airflow.contrib.hooks.redis_hook.RedisHook',
),
(
'airflow.providers.docker.hooks.docker.DockerHook',
'airflow.hooks.docker_hook.DockerHook',
),
(
'airflow.providers.microsoft.mssql.hooks.mssql.MsSqlHook',
'airflow.hooks.mssql_hook.MsSqlHook',
),
(
'airflow.providers.mysql.hooks.mysql.MySqlHook',
'airflow.hooks.mysql_hook.MySqlHook',
),
(
'airflow.providers.oracle.hooks.oracle.OracleHook',
'airflow.hooks.oracle_hook.OracleHook',
),
(
'airflow.providers.postgres.hooks.postgres.PostgresHook',
'airflow.hooks.postgres_hook.PostgresHook',
),
(
'airflow.providers.presto.hooks.presto.PrestoHook',
'airflow.hooks.presto_hook.PrestoHook',
),
(
'airflow.providers.samba.hooks.samba.SambaHook',
'airflow.hooks.samba_hook.SambaHook',
),
(
'airflow.providers.sqlite.hooks.sqlite.SqliteHook',
'airflow.hooks.sqlite_hook.SqliteHook',
),
(
'airflow.providers.cloudant.hooks.cloudant.CloudantHook',
'airflow.contrib.hooks.cloudant_hook.CloudantHook',
),
(
'airflow.providers.databricks.hooks.databricks.DatabricksHook',
'airflow.contrib.hooks.databricks_hook.DatabricksHook',
),
(
'airflow.providers.databricks.hooks.databricks.DatabricksHook',
'airflow.contrib.hooks.databricks_hook.DatabricksHook',
),
(
'airflow.providers.datadog.hooks.datadog.DatadogHook',
'airflow.contrib.hooks.datadog_hook.DatadogHook',
),
(
'airflow.providers.dingding.hooks.dingding.DingdingHook',
'airflow.contrib.hooks.dingding_hook.DingdingHook',
),
(
'airflow.providers.discord.hooks.discord_webhook.DiscordWebhookHook',
'airflow.contrib.hooks.discord_webhook_hook.DiscordWebhookHook',
),
(
'airflow.providers.google.suite.hooks.drive.GoogleDriveHook',
'airflow.contrib.hooks.gdrive_hook.GoogleDriveHook',
),
(
'airflow.providers.jenkins.hooks.jenkins.JenkinsHook',
'airflow.contrib.hooks.jenkins_hook.JenkinsHook',
),
(
'airflow.providers.opsgenie.hooks.opsgenie_alert.OpsgenieAlertHook',
'airflow.contrib.hooks.opsgenie_alert_hook.OpsgenieAlertHook',
),
(
'airflow.providers.pagerduty.hooks.pagerduty.PagerdutyHook',
'airflow.contrib.hooks.pagerduty_hook.PagerdutyHook',
),
(
'airflow.providers.qubole.hooks.qubole_check.QuboleCheckHook',
'airflow.contrib.hooks.qubole_check_hook.QuboleCheckHook',
),
(
'airflow.providers.qubole.hooks.qubole.QuboleHook',
'airflow.contrib.hooks.qubole_hook.QuboleHook',
),
(
'airflow.providers.salesforce.hooks.salesforce.SalesforceHook',
'airflow.contrib.hooks.salesforce_hook.SalesforceHook',
),
(
'airflow.providers.segment.hooks.segment.SegmentHook',
'airflow.contrib.hooks.segment_hook.SegmentHook',
),
(
'airflow.providers.slack.hooks.slack_webhook.SlackWebhookHook',
'airflow.contrib.hooks.slack_webhook_hook.SlackWebhookHook',
),
(
'airflow.providers.vertica.hooks.vertica.VerticaHook',
'airflow.contrib.hooks.vertica_hook.VerticaHook',
),
(
'airflow.providers.slack.hooks.slack.SlackHook',
'airflow.hooks.slack_hook.SlackHook',
),
(
'airflow.providers.zendesk.hooks.zendesk.ZendeskHook',
'airflow.hooks.zendesk_hook.ZendeskHook',
),
(
'airflow.providers.ftp.hooks.ftp.FTPSHook',
'airflow.contrib.hooks.ftp_hook.FTPSHook',
),
(
'airflow.providers.ftp.hooks.ftp.FTPHook',
'airflow.contrib.hooks.ftp_hook.FTPHook',
),
(
'airflow.providers.imap.hooks.imap.ImapHook',
'airflow.contrib.hooks.imap_hook.ImapHook',
),
(
'airflow.providers.ssh.hooks.ssh.SSHHook',
'airflow.contrib.hooks.ssh_hook.SSHHook',
),
(
'airflow.providers.microsoft.winrm.hooks.winrm.WinRMHook',
'airflow.contrib.hooks.winrm_hook.WinRMHook',
),
(
'airflow.providers.http.hooks.http.HttpHook',
'airflow.hooks.http_hook.HttpHook',
),
(
'airflow.providers.jdbc.hooks.jdbc.JdbcHook',
'airflow.hooks.jdbc_hook.JdbcHook',
),
(
'airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook',
'airflow.contrib.hooks.aws_hook.AwsHook',
),
(
'airflow.providers.amazon.aws.hooks.aws_dynamodb.AwsDynamoDBHook',
'airflow.contrib.hooks.aws_dynamodb_hook.AwsDynamoDBHook',
),
(
'airflow.providers.sftp.hooks.sftp.SFTPHook',
'airflow.contrib.hooks.sftp_hook.SFTPHook',
),
]
OPERATOR = [
(
"airflow.providers.google.cloud.operators.adls_to_gcs.ADLSToGCSOperator",
"airflow.contrib.operators.adls_to_gcs.AdlsToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.dataflow.DataflowCreateJavaJobOperator",
"airflow.contrib.operators.dataflow_operator.DataFlowJavaOperator",
),
(
"airflow.providers.google.cloud.operators.dataflow.DataflowCreatePythonJobOperator",
"airflow.contrib.operators.dataflow_operator.DataFlowPythonOperator",
),
(
"airflow.providers.google.cloud.operators.dataflow.DataflowTemplatedJobStartOperator",
"airflow.contrib.operators.dataflow_operator.DataflowTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreExportEntitiesOperator",
"airflow.contrib.operators.datastore_export_operator.DatastoreExportOperator",
),
(
"airflow.providers.google.cloud.operators.datastore.CloudDatastoreImportEntitiesOperator",
"airflow.contrib.operators.datastore_import_operator.DatastoreImportOperator",
),
(
"airflow.providers.google.cloud.operators.local_to_gcs.LocalFilesystemToGCSOperator",
"airflow.contrib.operators.file_to_gcs.FileToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.bigtable.BigtableUpdateClusterOperator",
"airflow.contrib.operators.gcp_bigtable_operator.BigtableClusterUpdateOperator",
),
(
"airflow.providers.google.cloud.operators.bigtable.BigtableCreateInstanceOperator",
"airflow.contrib.operators.gcp_bigtable_operator.BigtableInstanceCreateOperator",
),
(
"airflow.providers.google.cloud.operators.bigtable.BigtableDeleteInstanceOperator",
"airflow.contrib.operators.gcp_bigtable_operator.BigtableInstanceDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.bigtable.BigtableCreateTableOperator",
"airflow.contrib.operators.gcp_bigtable_operator.BigtableTableCreateOperator",
),
(
"airflow.providers.google.cloud.operators.bigtable.BigtableDeleteTableOperator",
"airflow.contrib.operators.gcp_bigtable_operator.BigtableTableDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_build.CloudBuildCreateOperator",
"airflow.contrib.operators.gcp_cloud_build_operator.CloudBuildCreateBuildOperator",
),
(
"airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator",
"airflow.contrib.operators.gcp_compute_operator.GceBaseOperator",
),
(
"airflow.providers.google.cloud.operators.compute"
".ComputeEngineInstanceGroupUpdateManagerTemplateOperator",
"airflow.contrib.operators.gcp_compute_operator."
"GceInstanceGroupManagerUpdateTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.compute.ComputeEngineStartInstanceOperator",
"airflow.contrib.operators.gcp_compute_operator.GceInstanceStartOperator",
),
(
"airflow.providers.google.cloud.operators.compute.ComputeEngineStopInstanceOperator",
"airflow.contrib.operators.gcp_compute_operator.GceInstanceStopOperator",
),
(
"airflow.providers.google.cloud.operators.compute.ComputeEngineCopyInstanceTemplateOperator",
"airflow.contrib.operators.gcp_compute_operator.GceInstanceTemplateCopyOperator",
),
(
"airflow.providers.google.cloud.operators.compute.ComputeEngineSetMachineTypeOperator",
"airflow.contrib.operators.gcp_compute_operator.GceSetMachineTypeOperator",
),
(
"airflow.providers.google.cloud.operators.kubernetes_engine.GKECreateClusterOperator",
"airflow.contrib.operators.gcp_container_operator.GKEClusterCreateOperator",
),
(
"airflow.providers.google.cloud.operators.kubernetes_engine.GKEDeleteClusterOperator",
"airflow.contrib.operators.gcp_container_operator.GKEClusterDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator",
"airflow.contrib.operators.gcp_container_operator.GKEPodOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCancelDLPJobOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCancelDLPJobOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDeidentifyTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCreateDeidentifyTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDLPJobOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCreateDLPJobOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCreateInspectTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCreateInspectTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCreateJobTriggerOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCreateJobTriggerOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPCreateStoredInfoTypeOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPCreateStoredInfoTypeOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeidentifyContentOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeidentifyContentOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDeidentifyTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeleteDeidentifyTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDLPJobOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeleteDlpJobOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteInspectTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeleteInspectTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteJobTriggerOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeleteJobTriggerOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteStoredInfoTypeOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPDeleteStoredInfoTypeOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPGetDeidentifyTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPGetDeidentifyTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPGetDlpJobOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPGetInspectTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPGetInspectTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobTriggerOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPGetJobTripperOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPGetStoredInfoTypeOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPGetStoredInfoTypeOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPInspectContentOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPInspectContentOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListDeidentifyTemplatesOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListDeidentifyTemplatesOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListDLPJobsOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListDlpJobsOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListInfoTypesOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListInfoTypesOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListInspectTemplatesOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListInspectTemplatesOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListJobTriggersOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListJobTriggersOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPListStoredInfoTypesOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPListStoredInfoTypesOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPRedactImageOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPReidentifyContentOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPReidentifyContentOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateDeidentifyTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPUpdateDeidentifyTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateInspectTemplateOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPUpdateInspectTemplateOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateJobTriggerOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPUpdateJobTriggerOperator",
),
(
"airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateStoredInfoTypeOperator",
"airflow.contrib.operators.gcp_dlp_operator.CloudDLPUpdateStoredInfoTypeOperator",
),
(
"airflow.providers.google.cloud.operators.functions.CloudFunctionDeleteFunctionOperator",
"airflow.contrib.operators.gcp_function_operator.GcfFunctionDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.functions.CloudFunctionDeployFunctionOperator",
"airflow.contrib.operators.gcp_function_operator.GcfFunctionDeployOperator",
),
(
"airflow.providers.google.cloud.operators.natural_language."
"CloudNaturalLanguageAnalyzeEntitiesOperator",
"airflow.contrib.operators.gcp_natural_language_operator."
"CloudLanguageAnalyzeEntitiesOperator",
),
(
"airflow.providers.google.cloud.operators.natural_language."
"CloudNaturalLanguageAnalyzeEntitySentimentOperator",
"airflow.contrib.operators.gcp_natural_language_operator."
"CloudLanguageAnalyzeEntitySentimentOperator",
),
(
"airflow.providers.google.cloud.operators.natural_language."
"CloudNaturalLanguageAnalyzeSentimentOperator",
"airflow.contrib.operators.gcp_natural_language_operator."
"CloudLanguageAnalyzeSentimentOperator",
),
(
"airflow.providers.google.cloud.operators.natural_language."
"CloudNaturalLanguageClassifyTextOperator",
"airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageClassifyTextOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerDeleteDatabaseInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDatabaseDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerDeployDatabaseInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDatabaseDeployOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerQueryDatabaseInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDatabaseQueryOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerUpdateDatabaseInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDatabaseUpdateOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerDeleteInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.spanner.SpannerDeployInstanceOperator",
"airflow.contrib.operators.gcp_spanner_operator.CloudSpannerInstanceDeployOperator",
),
(
"airflow.providers.google.cloud.operators.speech_to_text.CloudSpeechToTextRecognizeSpeechOperator",
"airflow.contrib.operators.gcp_speech_to_text_operator.GcpSpeechToTextRecognizeSpeechOperator",
),
(
"airflow.providers.google.cloud.operators.text_to_speech.CloudTextToSpeechSynthesizeOperator",
"airflow.contrib.operators.gcp_text_to_speech_operator.GcpTextToSpeechSynthesizeOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service"
".CloudDataTransferServiceCreateJobOperator",
"airflow.contrib.operators.gcp_transfer_operator.GcpTransferServiceJobCreateOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service"
".CloudDataTransferServiceDeleteJobOperator",
"airflow.contrib.operators.gcp_transfer_operator.GcpTransferServiceJobDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service"
".CloudDataTransferServiceUpdateJobOperator",
"airflow.contrib.operators.gcp_transfer_operator.GcpTransferServiceJobUpdateOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceCancelOperationOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GcpTransferServiceOperationCancelOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceGetOperationOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GcpTransferServiceOperationGetOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServicePauseOperationOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GcpTransferServiceOperationPauseOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceResumeOperationOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GcpTransferServiceOperationResumeOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceListOperationsOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GcpTransferServiceOperationsListOperator",
),
(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service."
"CloudDataTransferServiceGCSToGCSOperator",
"airflow.contrib.operators.gcp_transfer_operator."
"GoogleCloudStorageToGoogleCloudStorageTransferOperator",
),
(
"airflow.providers.google.cloud.operators.translate.CloudTranslateTextOperator",
"airflow.contrib.operators.gcp_translate_operator.CloudTranslateTextOperator",
),
(
"airflow.providers.google.cloud.operators.translate_speech.GcpTranslateSpeechOperator",
"airflow.contrib.operators.gcp_translate_speech_operator.GcpTranslateSpeechOperator",
),
(
"airflow.providers.google.cloud.operators.video_intelligence."
"CloudVideoIntelligenceDetectVideoExplicitContentOperator",
"airflow.contrib.operators.gcp_video_intelligence_operator."
"CloudVideoIntelligenceDetectVideoExplicitContentOperator",
),
(
"airflow.providers.google.cloud.operators.video_intelligence."
"CloudVideoIntelligenceDetectVideoLabelsOperator",
"airflow.contrib.operators.gcp_video_intelligence_operator."
"CloudVideoIntelligenceDetectVideoLabelsOperator",
),
(
"airflow.providers.google.cloud.operators.video_intelligence."
"CloudVideoIntelligenceDetectVideoShotsOperator",
"airflow.contrib.operators.gcp_video_intelligence_operator."
"CloudVideoIntelligenceDetectVideoShotsOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionAddProductToProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionAddProductToProductSetOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionImageAnnotateOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionAnnotateImageOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionTextDetectOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectDocumentTextOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageLabelsOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageLabelsOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionDetectImageSafeSearchOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionDetectTextOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectTextOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductCreateOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionGetProductOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetCreateOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionDeleteProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionGetProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetGetOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductSetUpdateOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionUpdateProductOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionCreateReferenceImageOperator",
"airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator",
),
(
"airflow.providers.google.cloud.operators.vision.CloudVisionRemoveProductFromProductSetOperator",
"airflow.contrib.operators.gcp_vision_operator."
"CloudVisionRemoveProductFromProductSetOperator",
),
(
"airflow.providers.google.cloud.operators.gcs_to_bigquery.GCSToBigQueryOperator",
"airflow.contrib.operators.gcs_to_bq.GoogleCloudStorageToBigQueryOperator",
),
(
"airflow.providers.google.cloud.operators.gcs_to_gcs.GCSToGCSOperator",
"airflow.contrib.operators.gcs_to_gcs.GoogleCloudStorageToGoogleCloudStorageOperator",
),
(
"airflow.providers.amazon.aws.operators.gcs_to_s3.GCSToS3Operator",
"airflow.contrib.operators.gcs_to_s3.GoogleCloudStorageToS3Operator",
),
(
"airflow.providers.google.cloud.operators.mlengine.MLEngineStartBatchPredictionJobOperator",
"airflow.contrib.operators.mlengine_operator.MLEngineBatchPredictionOperator",
),
(
"airflow.providers.google.cloud.operators.mlengine.MLEngineManageModelOperator",
"airflow.contrib.operators.mlengine_operator.MLEngineModelOperator",
),
(
"airflow.providers.google.cloud.operators.mlengine.MLEngineStartTrainingJobOperator",
"airflow.contrib.operators.mlengine_operator.MLEngineTrainingOperator",
),
(
"airflow.providers.google.cloud.operators.mlengine.MLEngineManageVersionOperator",
"airflow.contrib.operators.mlengine_operator.MLEngineVersionOperator",
),
(
"airflow.providers.google.cloud.operators.mssql_to_gcs.MSSQLToGCSOperator",
"airflow.contrib.operators.mssql_to_gcs.MsSqlToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.mysql_to_gcs.MySQLToGCSOperator",
"airflow.contrib.operators.mysql_to_gcs.MySqlToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.postgres_to_gcs.PostgresToGCSOperator",
"airflow.contrib.operators.postgres_to_gcs_operator."
"PostgresToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.pubsub.PubSubPublishMessageOperator",
"airflow.contrib.operators.pubsub_operator.PubSubPublishOperator",
),
(
"airflow.providers.google.cloud.operators.pubsub.PubSubCreateSubscriptionOperator",
"airflow.contrib.operators.pubsub_operator.PubSubSubscriptionCreateOperator",
),
(
"airflow.providers.google.cloud.operators.pubsub.PubSubDeleteSubscriptionOperator",
"airflow.contrib.operators.pubsub_operator.PubSubSubscriptionDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.pubsub.PubSubCreateTopicOperator",
"airflow.contrib.operators.pubsub_operator.PubSubTopicCreateOperator",
),
(
"airflow.providers.google.cloud.operators.pubsub.PubSubDeleteTopicOperator",
"airflow.contrib.operators.pubsub_operator.PubSubTopicDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.sql_to_gcs.BaseSQLToGCSOperator",
"airflow.contrib.operators.sql_to_gcs.BaseSQLToGoogleCloudStorageOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocCreateClusterOperator",
"airflow.contrib.operators.dataproc_operator.DataprocClusterCreateOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocDeleteClusterOperator",
"airflow.contrib.operators.dataproc_operator.DataprocClusterDeleteOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocScaleClusterOperator",
"airflow.contrib.operators.dataproc_operator.DataprocClusterScaleOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitHadoopJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcHadoopOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitHiveJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcHiveOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocJobBaseOperator",
"airflow.contrib.operators.dataproc_operator.DataProcJobBaseOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitPigJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcPigOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitPySparkJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcPySparkOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitSparkJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcSparkOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocSubmitSparkSqlJobOperator",
"airflow.contrib.operators.dataproc_operator.DataProcSparkSqlOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocInstantiateInlineWorkflowTemplateOperator",
"airflow.contrib.operators.dataproc_operator."
"DataprocWorkflowTemplateInstantiateInlineOperator",
),
(
"airflow.providers.google.cloud."
"operators.dataproc.DataprocInstantiateWorkflowTemplateOperator",
"airflow.contrib.operators.dataproc_operator."
"DataprocWorkflowTemplateInstantiateOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator",
"airflow.contrib.operators.bigquery_check_operator.BigQueryCheckOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryIntervalCheckOperator",
"airflow.contrib.operators.bigquery_check_operator.BigQueryIntervalCheckOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryValueCheckOperator",
"airflow.contrib.operators.bigquery_check_operator.BigQueryValueCheckOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryGetDataOperator",
"airflow.contrib.operators.bigquery_get_data.BigQueryGetDataOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryExecuteQueryOperator",
"airflow.contrib.operators.bigquery_operator.BigQueryOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery.BigQueryDeleteTableOperator",
"airflow.contrib.operators.bigquery_table_delete_operator.BigQueryTableDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery_to_bigquery.BigQueryToBigQueryOperator",
"airflow.contrib.operators.bigquery_to_bigquery.BigQueryToBigQueryOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery_to_gcs.BigQueryToGCSOperator",
"airflow.contrib.operators.bigquery_to_gcs.BigQueryToCloudStorageOperator",
),
(
"airflow.providers.google.cloud.operators.bigquery_to_mysql.BigQueryToMySqlOperator",
"airflow.contrib.operators.bigquery_to_mysql_operator.BigQueryToMySqlOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSBucketCreateAclEntryOperator",
"airflow.contrib.operators.gcs_acl_operator.GoogleCloudStorageBucketCreateAclEntryOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSObjectCreateAclEntryOperator",
"airflow.contrib.operators.gcs_acl_operator.GoogleCloudStorageObjectCreateAclEntryOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSDeleteObjectsOperator",
"airflow.contrib.operators.gcs_delete_operator.GoogleCloudStorageDeleteOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSToLocalOperator",
"airflow.contrib.operators.gcs_download_operator.GoogleCloudStorageDownloadOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSListObjectsOperator",
"airflow.contrib.operators.gcs_list_operator.GoogleCloudStorageListOperator",
),
(
"airflow.providers.google.cloud.operators.gcs.GCSCreateBucketOperator",
"airflow.contrib.operators.gcs_operator.GoogleCloudStorageCreateBucketOperator",
),
(
"airflow.providers.amazon.aws.operators.athena.AWSAthenaOperator",
"airflow.contrib.operators.aws_athena_operator.AWSAthenaOperator",
),
(
"airflow.providers.amazon.aws.operators.batch.AwsBatchOperator",
"airflow.contrib.operators.awsbatch_operator.AWSBatchOperator",
),
(
"airflow.providers.amazon.aws.operators.sqs.SQSPublishOperator",
"airflow.contrib.operators.aws_sqs_publish_operator.SQSPublishOperator",
),
(
"airflow.providers.amazon.aws.operators.sns.SnsPublishOperator",
"airflow.contrib.operators.sns_publish_operator.SnsPublishOperator",
),
(
'airflow.providers.apache.druid.operators.druid.DruidOperator',
'airflow.contrib.operators.druid_operator.DruidOperator',
),
(
'airflow.providers.apache.spark.operators.spark_jdbc.SparkSubmitOperator',
'airflow.contrib.operators.spark_jdbc_operator.SparkSubmitOperator',
),
(
'airflow.providers.apache.spark.operators.spark_sql.SparkSqlOperator',
'airflow.contrib.operators.spark_sql_operator.SparkSqlOperator',
),
(
'airflow.providers.apache.spark.operators.spark_submit.SparkSubmitOperator',
'airflow.contrib.operators.spark_submit_operator.SparkSubmitOperator',
),
(
'airflow.providers.apache.sqoop.operators.sqoop.SqoopOperator',
'airflow.contrib.operators.sqoop_operator.SqoopOperator',
),
(
'airflow.providers.apache.druid.operators.druid_check.DruidCheckOperator',
'airflow.operators.druid_check_operator.DruidCheckOperator',
),
(
'airflow.providers.apache.hive.operators.hive.HiveOperator',
'airflow.operators.hive_operator.HiveOperator',
),
(
'airflow.providers.apache.hive.operators.hive_stats.HiveStatsCollectionOperator',
'airflow.operators.hive_stats_operator.HiveStatsCollectionOperator',
),
(
'airflow.providers.apache.pig.operators.pig.PigOperator',
'airflow.operators.pig_operator.PigOperator',
),
(
'airflow.providers.microsoft.azure.operators.adls_list.AzureDataLakeStorageListOperator',
'airflow.contrib.operators.adls_list_operator.AzureDataLakeStorageListOperator',
),
(
'airflow.providers.microsoft.azure.operators'
'.azure_container_instances.AzureContainerInstancesOperator',
'airflow.contrib.operators.azure_container_instances_operator.AzureContainerInstancesOperator',
),
(
'airflow.providers.microsoft.azure.operators.azure_cosmos.AzureCosmosInsertDocumentOperator',
'airflow.contrib.operators.azure_cosmos_operator.AzureCosmosInsertDocumentOperator',
),
(
'airflow.providers.microsoft.azure.operators.wasb_delete_blob.WasbDeleteBlobOperator',
'airflow.contrib.operators.wasb_delete_blob_operator.WasbDeleteBlobOperator',
),
(
'airflow.providers.amazon.aws.operators.ecs.ECSOperator',
'airflow.contrib.operators.ecs_operator.ECSOperator',
),
(
'airflow.providers.amazon.aws.operators.emr_add_steps.EmrAddStepsOperator',
'airflow.contrib.operators.emr_add_steps_operator.EmrAddStepsOperator',
),
(
'airflow.providers.amazon.aws.operators.emr_create_job_flow.EmrCreateJobFlowOperator',
'airflow.contrib.operators.emr_create_job_flow_operator.EmrCreateJobFlowOperator',
),
(
'airflow.providers.amazon.aws.operators.emr_terminate_job_flow.EmrTerminateJobFlowOperator',
'airflow.contrib.operators.emr_terminate_job_flow_operator.EmrTerminateJobFlowOperator',
),
(
'airflow.providers.amazon.aws.operators.s3_copy_object.S3CopyObjectOperator',
'airflow.contrib.operators.s3_copy_object_operator.S3CopyObjectOperator',
),
(
'airflow.providers.amazon.aws.operators.s3_delete_objects.S3DeleteObjectsOperator',
'airflow.contrib.operators.s3_delete_objects_operator.S3DeleteObjectsOperator',
),
(
'airflow.providers.amazon.aws.operators.s3_list.S3ListOperator',
'airflow.contrib.operators.s3_list_operator.S3ListOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_base.SageMakerBaseOperator',
'airflow.contrib.operators.sagemaker_base_operator.SageMakerBaseOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_endpoint_config.SageMakerEndpointConfigOperator',
'airflow.contrib.operators.sagemaker_endpoint_config_operator.SageMakerEndpointConfigOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_endpoint.SageMakerEndpointOperator',
'airflow.contrib.operators.sagemaker_endpoint_operator.SageMakerEndpointOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_model.SageMakerModelOperator',
'airflow.contrib.operators.sagemaker_model_operator.SageMakerModelOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_training.SageMakerTrainingOperator',
'airflow.contrib.operators.sagemaker_training_operator.SageMakerTrainingOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_transform.SageMakerTransformOperator',
'airflow.contrib.operators.sagemaker_transform_operator.SageMakerTransformOperator',
),
(
'airflow.providers.amazon.aws.operators.sagemaker_tuning.SageMakerTuningOperator',
'airflow.contrib.operators.sagemaker_tuning_operator.SageMakerTuningOperator',
),
(
'airflow.providers.docker.operators.docker_swarm.DockerSwarmOperator',
'airflow.contrib.operators.docker_swarm_operator.DockerSwarmOperator',
),
(
'airflow.providers.cncf.kubernetes.operators.kubernetes_pod.KubernetesPodOperator',
'airflow.contrib.operators.kubernetes_pod_operator.KubernetesPodOperator',
),
(
'airflow.providers.redis.operators.redis_publish.RedisPublishOperator',
'airflow.contrib.operators.redis_publish_operator.RedisPublishOperator',
),
(
'airflow.operators.bash.BashOperator',
'airflow.operators.bash_operator.BashOperator',
),
(
'airflow.providers.docker.operators.docker.DockerOperator',
'airflow.operators.docker_operator.DockerOperator',
),
(
'airflow.providers.microsoft.mssql.operators.mssql.MsSqlOperator',
'airflow.operators.mssql_operator.MsSqlOperator',
),
(
'airflow.providers.mysql.operators.mysql.MySqlOperator',
'airflow.operators.mysql_operator.MySqlOperator',
),
(
'airflow.providers.oracle.operators.oracle.OracleOperator',
'airflow.operators.oracle_operator.OracleOperator',
),
(
'airflow.providers.papermill.operators.papermill.PapermillOperator',
'airflow.operators.papermill_operator.PapermillOperator',
),
(
'airflow.providers.presto.operators.presto_check.PrestoCheckOperator',
'airflow.operators.presto_check_operator.PrestoCheckOperator',
),
(
'airflow.providers.presto.operators.presto_check.PrestoIntervalCheckOperator',
'airflow.operators.presto_check_operator.PrestoIntervalCheckOperator',
),
(
'airflow.providers.presto.operators.presto_check.PrestoValueCheckOperator',
'airflow.operators.presto_check_operator.PrestoValueCheckOperator',
),
(
'airflow.operators.python.BranchPythonOperator',
'airflow.operators.python_operator.BranchPythonOperator',
),
(
'airflow.operators.python.PythonOperator',
'airflow.operators.python_operator.PythonOperator',
),
(
'airflow.operators.python.ShortCircuitOperator',
'airflow.operators.python_operator.ShortCircuitOperator',
),
(
'airflow.operators.python.PythonVirtualenvOperator',
'airflow.operators.python_operator.PythonVirtualenvOperator',
),
(
'airflow.providers.sqlite.operators.sqlite.SqliteOperator',
'airflow.operators.sqlite_operator.SqliteOperator',
),
(
'airflow.providers.databricks.operators.databricks.DatabricksRunNowOperator',
'airflow.contrib.operators.databricks_operator.DatabricksRunNowOperator',
),
(
'airflow.providers.databricks.operators.databricks.DatabricksSubmitRunOperator',
'airflow.contrib.operators.databricks_operator.DatabricksSubmitRunOperator',
),
(
'airflow.providers.dingding.operators.dingding.DingdingOperator',
'airflow.contrib.operators.dingding_operator.DingdingOperator',
),
(
'airflow.providers.discord.operators.discord_webhook.DiscordWebhookOperator',
'airflow.contrib.operators.discord_webhook_operator.DiscordWebhookOperator',
),
(
'airflow.providers.jenkins.operators.jenkins_job_trigger.JenkinsJobTriggerOperator',
'airflow.contrib.operators.jenkins_job_trigger_operator.JenkinsJobTriggerOperator',
),
(
'airflow.providers.opsgenie.operators.opsgenie_alert.OpsgenieAlertOperator',
'airflow.contrib.operators.opsgenie_alert_operator.OpsgenieAlertOperator',
),
(
'airflow.providers.qubole.operators.qubole_check.QuboleCheckOperator',
'airflow.contrib.operators.qubole_check_operator.QuboleCheckOperator',
),
(
'airflow.providers.qubole.operators.qubole_check.QuboleValueCheckOperator',
'airflow.contrib.operators.qubole_check_operator.QuboleValueCheckOperator',
),
(
'airflow.providers.qubole.operators.qubole.QuboleOperator',
'airflow.contrib.operators.qubole_operator.QuboleOperator',
),
(
'airflow.providers.segment.operators.segment_track_event.SegmentTrackEventOperator',
'airflow.contrib.operators.segment_track_event_operator.SegmentTrackEventOperator',
),
(
'airflow.providers.slack.operators.slack_webhook.SlackWebhookOperator',
'airflow.contrib.operators.slack_webhook_operator.SlackWebhookOperator',
),
(
'airflow.providers.vertica.operators.vertica.VerticaOperator',
'airflow.contrib.operators.vertica_operator.VerticaOperator',
),
(
'airflow.providers.datadog.sensors.datadog.DatadogSensor',
'airflow.contrib.sensors.datadog_sensor.DatadogSensor',
),
(
'airflow.providers.slack.operators.slack.SlackAPIPostOperator',
'airflow.operators.slack_operator.SlackAPIPostOperator',
),
(
'airflow.providers.slack.operators.slack.SlackAPIOperator',
'airflow.operators.slack_operator.SlackAPIOperator',
),
(
'airflow.providers.grpc.operators.grpc.GrpcOperator',
'airflow.contrib.operators.grpc_operator.GrpcOperator',
),
(
'airflow.providers.ssh.operators.ssh.SSHOperator',
'airflow.contrib.operators.ssh_operator.SSHOperator',
),
(
'airflow.providers.microsoft.winrm.operators.winrm.WinRMOperator',
'airflow.contrib.operators.winrm_operator.WinRMOperator',
),
(
'airflow.providers.email.operators.email.EmailOperator',
'airflow.operators.email_operator.EmailOperator',
),
(
'airflow.providers.http.operators.http.SimpleHttpOperator',
'airflow.operators.http_operator.SimpleHttpOperator',
),
(
'airflow.providers.jdbc.operators.jdbc.JdbcOperator',
'airflow.operators.jdbc_operator.JdbcOperator',
),
(
'airflow.providers.sftp.operators.sftp.SFTPOperator',
'airflow.contrib.operators.sftp_operator.SFTPOperator',
),
(
'airflow.providers.amazon.aws.operators.dynamodb_to_s3.DynamoDBToS3Operator',
'airflow.contrib.operators.dynamodb_to_s3.DynamoDBToS3Operator',
),
(
'airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator',
'airflow.contrib.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator',
),
(
'airflow.providers.amazon.aws.operators.imap_attachment_to_s3.ImapAttachmentToS3Operator',
'airflow.contrib.operators.imap_attachment_to_s3_operator.ImapAttachmentToS3Operator',
),
(
'airflow.providers.amazon.aws.operators.mongo_to_s3.MongoToS3Operator',
'airflow.contrib.operators.mongo_to_s3.MongoToS3Operator',
),
(
'airflow.providers.amazon.aws.operators.s3_to_sftp.S3ToSFTPOperator',
'airflow.contrib.operators.s3_to_sftp_operator.S3ToSFTPOperator',
),
(
'airflow.providers.amazon.aws.operators.sftp_to_s3.SFTPToS3Operator',
'airflow.contrib.operators.sftp_to_s3_operator.SFTPToS3Operator',
),
(
'airflow.providers.amazon.aws.operators.gcs_to_s3.GCSToS3Operator',
'airflow.operators.gcs_to_s3.GCSToS3Operator',
),
(
'airflow.providers.amazon.aws.operators.google_api_to_s3_transfer.GoogleApiToS3Transfer',
'airflow.operators.google_api_to_s3_transfer.GoogleApiToS3Transfer',
),
(
'airflow.providers.amazon.aws.operators.redshift_to_s3.RedshiftToS3Transfer',
'airflow.operators.redshift_to_s3_operator.RedshiftToS3Transfer',
),
(
'airflow.providers.amazon.aws.operators.s3_to_redshift.S3ToRedshiftTransfer',
'airflow.operators.s3_to_redshift_operator.S3ToRedshiftTransfer',
),
(
'airflow.providers.apache.hive.operators.vertica_to_hive.VerticaToHiveTransfer',
'airflow.contrib.operators.vertica_to_hive.VerticaToHiveTransfer',
),
(
'airflow.providers.apache.druid.operators.hive_to_druid.HiveToDruidTransfer',
'airflow.operators.hive_to_druid.HiveToDruidTransfer',
),
(
'airflow.providers.apache.hive.operators.hive_to_mysql.HiveToMySqlTransfer',
'airflow.operators.hive_to_mysql.HiveToMySqlTransfer',
),
(
'airflow.providers.apache.hive.operators.hive_to_samba.Hive2SambaOperator',
'airflow.operators.hive_to_samba_operator.Hive2SambaOperator',
),
(
'airflow.providers.apache.hive.operators.mssql_to_hive.MsSqlToHiveTransfer',
'airflow.operators.mssql_to_hive.MsSqlToHiveTransfer',
),
(
'airflow.providers.microsoft.azure.operators.file_to_wasb.FileToWasbOperator',
'airflow.contrib.operators.file_to_wasb.FileToWasbOperator',
),
(
'airflow.providers.google.suite.operators.gcs_to_gdrive.GCSToGoogleDriveOperator',
'airflow.contrib.operators.gcs_to_gdrive_operator.GCSToGoogleDriveOperator',
),
(
'airflow.providers.microsoft.azure.operators.oracle_to_azure_data_lake_transfer'
'.OracleToAzureDataLakeTransfer',
'airflow.contrib.operators.oracle_to_azure_data_lake_transfer.OracleToAzureDataLakeTransfer',
),
(
'airflow.providers.oracle.operators.oracle_to_oracle_transfer.OracleToOracleTransfer',
'airflow.contrib.operators.oracle_to_oracle_transfer.OracleToOracleTransfer',
),
(
'airflow.providers.google.cloud.operators.s3_to_gcs.S3ToGCSOperator',
'airflow.contrib.operators.s3_to_gcs_operator.S3ToGCSOperator',
),
(
'airflow.providers.mysql.operators.vertica_to_mysql.VerticaToMySqlTransfer',
'airflow.contrib.operators.vertica_to_mysql.VerticaToMySqlTransfer',
),
(
'airflow.providers.mysql.operators.presto_to_mysql.PrestoToMySqlTransfer',
'airflow.operators.presto_to_mysql.PrestoToMySqlTransfer',
),
]
SENSOR = [
(
"airflow.providers.google.cloud.sensors.bigtable.BigtableTableReplicationCompletedSensor",
"airflow.contrib.operators.gcp_bigtable_operator."
"BigtableTableWaitForReplicationSensor",
),
(
"airflow.providers.google.cloud.sensors.cloud_storage_transfer_service."
"CloudDataTransferServiceJobStatusSensor",
"airflow.contrib.sensors.gcp_transfer_sensor."
"GCPTransferServiceWaitForJobStatusSensor",
),
(
"airflow.providers.google.cloud.sensors.pubsub.PubSubPullSensor",
"airflow.contrib.sensors.pubsub_sensor.PubSubPullSensor",
),
(
"airflow.providers.google.cloud.sensors.bigquery.BigQueryTableExistenceSensor",
"airflow.contrib.sensors.bigquery_sensor.BigQueryTableSensor",
),
(
"airflow.providers.google.cloud.sensors.gcs.GCSObjectExistenceSensor",
"airflow.contrib.sensors.gcs_sensor.GoogleCloudStorageObjectSensor",
),
(
"airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor",
"airflow.contrib.sensors.gcs_sensor.GoogleCloudStorageObjectUpdatedSensor",
),
(
"airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor",
"airflow.contrib.sensors.gcs_sensor.GoogleCloudStoragePrefixSensor",
),
(
"airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor",
"airflow.contrib.sensors.gcs_sensor.GoogleCloudStorageUploadSessionCompleteSensor",
),
(
"airflow.providers.amazon.aws.sensors.athena.AthenaSensor",
"airflow.contrib.sensors.aws_athena_sensor.AthenaSensor",
),
(
"airflow.providers.amazon.aws.sensors.sqs.SQSSensor",
"airflow.contrib.sensors.aws_sqs_sensor.SQSSensor",
),
(
'airflow.providers.apache.hdfs.sensors.hdfs.HdfsSensorFolder',
'airflow.contrib.sensors.hdfs_sensor.HdfsSensorFolder',
),
(
'airflow.providers.apache.hdfs.sensors.hdfs.HdfsSensorRegex',
'airflow.contrib.sensors.hdfs_sensor.HdfsSensorRegex',
),
(
'airflow.providers.apache.hive.sensors.hive_partition.HivePartitionSensor',
'airflow.sensors.hive_partition_sensor.HivePartitionSensor',
),
(
'airflow.providers.apache.hive.sensors.metastore_partition.MetastorePartitionSensor',
'airflow.sensors.metastore_partition_sensor.MetastorePartitionSensor',
),
(
'airflow.providers.apache.hive.sensors.named_hive_partition.NamedHivePartitionSensor',
'airflow.sensors.named_hive_partition_sensor.NamedHivePartitionSensor',
),
(
'airflow.providers.apache.hdfs.sensors.web_hdfs.WebHdfsSensor',
'airflow.sensors.web_hdfs_sensor.WebHdfsSensor',
),
(
'airflow.providers.apache.hdfs.sensors.hdfs.HdfsSensor',
'airflow.sensors.hdfs_sensor.HdfsSensor',
),
(
'airflow.sensors.weekday_sensor.DayOfWeekSensor',
'airflow.contrib.sensors.weekday_sensor.DayOfWeekSensor',
),
(
'airflow.sensors.filesystem.FileSensor',
'airflow.contrib.sensors.file_sensor.FileSensor',
),
(
'airflow.providers.microsoft.azure.sensors.wasb.WasbBlobSensor',
'airflow.contrib.sensors.wasb_sensor.WasbBlobSensor',
),
(
'airflow.providers.microsoft.azure.sensors.wasb.WasbPrefixSensor',
'airflow.contrib.sensors.wasb_sensor.WasbPrefixSensor',
),
(
'airflow.providers.amazon.aws.sensors.glue_catalog_partition.AwsGlueCatalogPartitionSensor',
'airflow.contrib.sensors.aws_glue_catalog_partition_sensor.AwsGlueCatalogPartitionSensor',
),
(
'airflow.providers.amazon.aws.sensors.emr_base.EmrBaseSensor',
'airflow.contrib.sensors.emr_base_sensor.EmrBaseSensor',
),
(
'airflow.providers.amazon.aws.sensors.emr_job_flow.EmrJobFlowSensor',
'airflow.contrib.sensors.emr_job_flow_sensor.EmrJobFlowSensor',
),
(
'airflow.providers.amazon.aws.sensors.emr_step.EmrStepSensor',
'airflow.contrib.sensors.emr_step_sensor.EmrStepSensor',
),
(
'airflow.providers.amazon.aws.sensors.sagemaker_base.SageMakerBaseSensor',
'airflow.contrib.sensors.sagemaker_base_sensor.SageMakerBaseSensor',
),
(
'airflow.providers.amazon.aws.sensors.sagemaker_endpoint.SageMakerEndpointSensor',
'airflow.contrib.sensors.sagemaker_endpoint_sensor.SageMakerEndpointSensor',
),
(
'airflow.providers.amazon.aws.sensors.sagemaker_transform.SageMakerTransformSensor',
'airflow.contrib.sensors.sagemaker_transform_sensor.SageMakerTransformSensor',
),
(
'airflow.providers.amazon.aws.sensors.sagemaker_tuning.SageMakerTuningSensor',
'airflow.contrib.sensors.sagemaker_tuning_sensor.SageMakerTuningSensor',
),
(
'airflow.providers.amazon.aws.operators.s3_file_transform.S3FileTransformOperator',
'airflow.operators.s3_file_transform_operator.S3FileTransformOperator',
),
(
'airflow.providers.amazon.aws.sensors.s3_key.S3KeySensor',
'airflow.sensors.s3_key_sensor.S3KeySensor',
),
(
'airflow.providers.amazon.aws.sensors.s3_prefix.S3PrefixSensor',
'airflow.sensors.s3_prefix_sensor.S3PrefixSensor',
),
(
'airflow.sensors.bash.BashSensor',
'airflow.contrib.sensors.bash_sensor.BashSensor',
),
(
'airflow.providers.celery.sensors.celery_queue.CeleryQueueSensor',
'airflow.contrib.sensors.celery_queue_sensor.CeleryQueueSensor',
),
(
'airflow.providers.mongo.sensors.mongo.MongoSensor',
'airflow.contrib.sensors.mongo_sensor.MongoSensor',
),
(
'airflow.sensors.python.PythonSensor',
'airflow.contrib.sensors.python_sensor.PythonSensor',
),
(
'airflow.providers.redis.sensors.redis_key.RedisKeySensor',
'airflow.contrib.sensors.redis_key_sensor.RedisKeySensor',
),
(
'airflow.providers.redis.sensors.redis_pub_sub.RedisPubSubSensor',
'airflow.contrib.sensors.redis_pub_sub_sensor.RedisPubSubSensor',
),
(
'airflow.providers.datadog.sensors.datadog.DatadogSensor',
'airflow.contrib.sensors.datadog_sensor.DatadogSensor',
),
(
'airflow.providers.qubole.sensors.qubole.QuboleSensor',
'airflow.contrib.sensors.qubole_sensor.QuboleSensor',
),
(
'airflow.providers.qubole.sensors.qubole.QubolePartitionSensor',
'airflow.contrib.sensors.qubole_sensor.QubolePartitionSensor',
),
(
'airflow.providers.qubole.sensors.qubole.QuboleFileSensor',
'airflow.contrib.sensors.qubole_sensor.QuboleFileSensor',
),
(
'airflow.providers.ftp.sensors.ftp.FTPSensor',
'airflow.contrib.sensors.ftp_sensor.FTPSensor',
),
(
'airflow.providers.ftp.sensors.ftp.FTPSSensor',
'airflow.contrib.sensors.ftp_sensor.FTPSSensor',
),
(
'airflow.providers.imap.sensors.imap_attachment.ImapAttachmentSensor',
'airflow.contrib.sensors.imap_attachment_sensor.ImapAttachmentSensor',
),
(
'airflow.providers.http.sensors.http.HttpSensor',
'airflow.sensors.http_sensor.HttpSensor',
),
(
'airflow.providers.sftp.sensors.sftp.SFTPSensor',
'airflow.contrib.sensors.sftp_sensor.SFTPSensor',
)
]
PROTOCOLS = [
(
"airflow.providers.amazon.aws.hooks.batch_client.AwsBatchProtocol",
"airflow.contrib.operators.awsbatch_operator.BatchProtocol",
),
(
'airflow.providers.amazon.aws.operators.ecs.ECSProtocol',
'airflow.contrib.operators.ecs_operator.ECSProtocol',
),
]
ALL = HOOK + OPERATOR + SENSOR + PROTOCOLS
RENAMED_HOOKS = [
(old_class, new_class)
for old_class, new_class in HOOK + OPERATOR + SENSOR
if old_class.rpartition(".")[2] != new_class.rpartition(".")[2]
]
class TestMovingCoreToContrib(TestCase):
@staticmethod
def assert_warning(msg: str, warning: Any):
error = "Text '{}' not in warnings".format(msg)
assert any(msg in str(w) for w in warning.warnings), error
def assert_is_subclass(self, clazz, other):
self.assertTrue(
issubclass(clazz, other), "{} is not subclass of {}".format(clazz, other)
)
def assert_proper_import(self, old_resource, new_resource):
new_path, _, _ = new_resource.rpartition(".")
old_path, _, _ = old_resource.rpartition(".")
with self.assertWarns(DeprecationWarning) as warning_msg:
# Reload to see deprecation warning each time
importlib.reload(importlib.import_module(old_path))
self.assert_warning(new_path, warning_msg)
@staticmethod
def get_class_from_path(path_to_class, parent=False):
"""
:param parent indicates if "path_to_class" arg is super class
"""
path, _, class_name = path_to_class.rpartition(".")
module = importlib.import_module(path)
class_ = getattr(module, class_name)
if isabstract(class_) and not parent:
class_name = f"Mock({class_.__name__})"
attributes = {
a: mock.MagicMock() for a in class_.__abstractmethods__
}
new_class = type(class_name, (class_,), attributes)
return new_class
return class_
@parameterized.expand(PROTOCOLS)
def test_is_protocol_deprecated(self, _, old_module):
deprecation_warning_msg = "This class is deprecated."
old_module_class = self.get_class_from_path(old_module)
with self.assertWarnsRegex(DeprecationWarning, deprecation_warning_msg) as wrn:
self.assertTrue(deprecation_warning_msg, wrn)
old_module_class()
@parameterized.expand(RENAMED_HOOKS)
def test_is_class_deprecated(self, new_module, old_module):
deprecation_warning_msg = "This class is deprecated."
old_module_class = self.get_class_from_path(old_module)
with self.assertWarnsRegex(DeprecationWarning, deprecation_warning_msg) as wrn:
with mock.patch("{}.__init__".format(new_module)) as init_mock:
init_mock.return_value = None
self.assertTrue(deprecation_warning_msg, wrn)
old_module_class()
init_mock.assert_called_once_with()
@parameterized.expand(ALL)
def test_is_subclass(self, parent_class_path, sub_class_path):
with mock.patch("{}.__init__".format(parent_class_path)):
parent_class_path = self.get_class_from_path(parent_class_path, parent=True)
sub_class_path = self.get_class_from_path(sub_class_path)
self.assert_is_subclass(sub_class_path, parent_class_path)
@parameterized.expand(ALL)
def test_warning_on_import(self, new_path, old_path):
self.assert_proper_import(old_path, new_path)
|
|
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pyparsing definitions and helper functions for parsing MuJoCo headers."""
import pyparsing as pp
# NB: Don't enable parser memoization (`pp.ParserElement.enablePackrat()`),
# since this results in a ~6x slowdown.
NONE = "None"
CTYPES_CHAR = "ctypes.c_char"
C_TO_CTYPES = {
# integers
"int": "ctypes.c_int",
"unsigned int": "ctypes.c_uint",
"char": CTYPES_CHAR,
"unsigned char": "ctypes.c_ubyte",
"size_t": "ctypes.c_size_t",
# floats
"float": "ctypes.c_float",
"double": "ctypes.c_double",
# pointers
"void": NONE,
}
CTYPES_PTRS = {NONE: "ctypes.c_void_p"}
CTYPES_TO_NUMPY = {
# integers
"ctypes.c_int": "np.intc",
"ctypes.c_uint": "np.uintc",
"ctypes.c_ubyte": "np.ubyte",
# floats
"ctypes.c_float": "np.float32",
"ctypes.c_double": "np.float64",
}
# Helper functions for constructing recursive parsers.
# ------------------------------------------------------------------------------
def _nested_scopes(opening, closing, body):
"""Constructs a parser for (possibly nested) scopes."""
scope = pp.Forward()
scope << pp.Group( # pylint: disable=expression-not-assigned
opening +
pp.ZeroOrMore(body | scope)("members") +
closing)
return scope
def _nested_if_else(if_, pred, else_, endif, match_if_true, match_if_false):
"""Constructs a parser for (possibly nested) if...(else)...endif blocks."""
ifelse = pp.Forward()
ifelse << pp.Group( # pylint: disable=expression-not-assigned
if_ +
pred("predicate") +
pp.ZeroOrMore(match_if_true | ifelse)("if_true") +
pp.Optional(else_ +
pp.ZeroOrMore(match_if_false | ifelse)("if_false")) +
endif)
return ifelse
# Some common string patterns to suppress.
# ------------------------------------------------------------------------------
(X, LPAREN, RPAREN, LBRACK, RBRACK, LBRACE, RBRACE, SEMI, COMMA, EQUAL, FSLASH,
BSLASH) = list(map(pp.Suppress, "X()[]{};,=/\\"))
EOL = pp.LineEnd().suppress()
# Comments, continuation.
# ------------------------------------------------------------------------------
COMMENT = pp.Combine(
pp.Suppress("//") +
pp.Optional(pp.White()).suppress() +
pp.SkipTo(pp.LineEnd()))
MULTILINE_COMMENT = pp.delimitedList(
COMMENT.copy().setWhitespaceChars(" \t"), delim=EOL)
CONTINUATION = (BSLASH + pp.LineEnd()).suppress()
# Preprocessor directives.
# ------------------------------------------------------------------------------
DEFINE = pp.Keyword("#define").suppress()
IFDEF = pp.Keyword("#ifdef").suppress()
IFNDEF = pp.Keyword("#ifndef").suppress()
ELSE = pp.Keyword("#else").suppress()
ENDIF = pp.Keyword("#endif").suppress()
# Variable names, types, literals etc.
# ------------------------------------------------------------------------------
NAME = pp.Word(pp.alphanums + "_")
INT = pp.Word(pp.nums + "UuLl")
FLOAT = pp.Word(pp.nums + ".+-EeFf")
NUMBER = FLOAT | INT
# Dimensions can be of the form `[3]`, `[constant_name]` or `[2*constant_name]`
ARRAY_DIM = pp.Combine(
LBRACK +
(INT | NAME) +
pp.Optional(pp.Literal("*")) +
pp.Optional(INT | NAME) +
RBRACK)
PTR = pp.Literal("*")
EXTERN = pp.Keyword("extern")
NATIVE_TYPENAME = pp.MatchFirst([pp.Keyword(n) for n in C_TO_CTYPES.keys()])
# Macros.
# ------------------------------------------------------------------------------
HDR_GUARD = DEFINE + "THIRD_PARTY_MUJOCO_HDRS_"
# e.g. "#define mjUSEDOUBLE"
DEF_FLAG = pp.Group(
DEFINE +
NAME("name") +
(COMMENT("comment") | EOL)).ignore(HDR_GUARD)
# e.g. "#define mjMINVAL 1E-14 // minimum value in any denominator"
DEF_CONST = pp.Group(
DEFINE +
NAME("name") +
(NUMBER | NAME)("value") +
(COMMENT("comment") | EOL))
# e.g. "X( mjtNum*, name_textadr, ntext, 1 )"
XDIM = pp.delimitedList(
(
pp.Suppress(pp.Keyword("MJ_M") + LPAREN) +
NAME +
pp.Suppress(RPAREN)
) | NAME | INT, delim="*", combine=True)
XMEMBER = pp.Group(
X +
LPAREN +
(NATIVE_TYPENAME | NAME)("typename") +
pp.Optional(PTR("ptr")) +
COMMA +
NAME("name") +
COMMA +
pp.delimitedList(XDIM, delim=COMMA)("dims") +
RPAREN)
XMACRO = pp.Group(
pp.Optional(COMMENT("comment")) +
DEFINE +
NAME("name") +
CONTINUATION +
pp.delimitedList(XMEMBER, delim=CONTINUATION)("members"))
# Type/variable declarations.
# ------------------------------------------------------------------------------
TYPEDEF = pp.Keyword("typedef").suppress()
STRUCT = pp.Keyword("struct")
UNION = pp.Keyword("union")
ENUM = pp.Keyword("enum").suppress()
# e.g. "typedef unsigned char mjtByte; // used for true/false"
TYPE_DECL = pp.Group(
TYPEDEF +
pp.Optional(STRUCT) +
(NATIVE_TYPENAME | NAME)("typename") +
pp.Optional(PTR("ptr")) +
NAME("name") +
SEMI +
pp.Optional(COMMENT("comment")))
# Declarations of flags/constants/types.
UNCOND_DECL = DEF_FLAG | DEF_CONST | TYPE_DECL
# Declarations inside (possibly nested) #if(n)def... #else... #endif... blocks.
COND_DECL = _nested_if_else(IFDEF, NAME, ELSE, ENDIF, UNCOND_DECL, UNCOND_DECL)
# Note: this doesn't work for '#if defined(FLAG)' blocks
# e.g. "mjtNum gravity[3]; // gravitational acceleration"
STRUCT_MEMBER = pp.Group(
pp.Optional(STRUCT("struct")) +
(NATIVE_TYPENAME | NAME)("typename") +
pp.Optional(PTR("ptr")) +
NAME("name") +
pp.ZeroOrMore(ARRAY_DIM)("size") +
SEMI +
pp.Optional(COMMENT("comment")))
# Struct declaration within a union (non-nested).
UNION_STRUCT_DECL = pp.Group(
STRUCT("struct") +
pp.Optional(NAME("typename")) +
pp.Optional(COMMENT("comment")) +
LBRACE +
pp.OneOrMore(STRUCT_MEMBER)("members") +
RBRACE +
pp.Optional(NAME("name")) +
SEMI)
ANONYMOUS_UNION_DECL = pp.Group(
pp.Optional(MULTILINE_COMMENT("comment")) +
UNION("anonymous_union") +
LBRACE +
pp.OneOrMore(
UNION_STRUCT_DECL |
STRUCT_MEMBER |
COMMENT.suppress())("members") +
RBRACE +
SEMI)
# Multiple (possibly nested) struct declarations.
NESTED_STRUCTS = _nested_scopes(
opening=(STRUCT +
pp.Optional(NAME("typename")) +
pp.Optional(COMMENT("comment")) +
LBRACE),
closing=(RBRACE + pp.Optional(NAME("name")) + SEMI),
body=pp.OneOrMore(
STRUCT_MEMBER |
ANONYMOUS_UNION_DECL |
COMMENT.suppress())("members"))
BIT_LSHIFT = INT("bit_lshift_a") + pp.Suppress("<<") + INT("bit_lshift_b")
ENUM_LINE = pp.Group(
NAME("name") +
pp.Optional(EQUAL + (INT("value") ^ BIT_LSHIFT)) +
pp.Optional(COMMA) +
pp.Optional(COMMENT("comment")))
ENUM_DECL = pp.Group(
TYPEDEF +
ENUM +
NAME("typename") +
pp.Optional(COMMENT("comment")) +
LBRACE +
pp.OneOrMore(ENUM_LINE | COMMENT.suppress())("members") +
RBRACE +
pp.Optional(NAME("name")) +
SEMI)
# Function declarations.
# ------------------------------------------------------------------------------
MJAPI = pp.Keyword("MJAPI").suppress()
CONST = pp.Keyword("const")
VOID = pp.Group(pp.Keyword("void") + ~PTR).suppress()
ARG = pp.Group(
pp.Optional(CONST("is_const")) +
(NATIVE_TYPENAME | NAME)("typename") +
pp.Optional(PTR("ptr")) +
NAME("name") +
pp.Optional(ARRAY_DIM("size")))
RET = pp.Group(
pp.Optional(CONST("is_const")) +
(NATIVE_TYPENAME | NAME)("typename") +
pp.Optional(PTR("ptr")))
FUNCTION_DECL = (
(VOID | RET("return_value")) +
NAME("name") +
LPAREN +
(VOID | pp.delimitedList(ARG, delim=COMMA)("arguments")) +
RPAREN +
SEMI)
MJAPI_FUNCTION_DECL = pp.Group(
pp.Optional(MULTILINE_COMMENT("comment")) +
pp.LineStart() +
MJAPI +
FUNCTION_DECL)
# e.g.
# // predicate function: set enable/disable based on item category
# typedef int (*mjfItemEnable)(int category, void* data);
FUNCTION_PTR_TYPE_DECL = pp.Group(
pp.Optional(MULTILINE_COMMENT("comment")) +
TYPEDEF +
RET("return_type") +
LPAREN +
PTR +
NAME("typename") +
RPAREN +
LPAREN +
(VOID | pp.delimitedList(ARG, delim=COMMA)("arguments")) +
RPAREN +
SEMI)
# Global variables.
# ------------------------------------------------------------------------------
MJAPI_STRING_ARRAY = (
MJAPI +
EXTERN +
CONST +
pp.Keyword("char") +
PTR +
NAME("name") +
pp.OneOrMore(ARRAY_DIM)("dims") +
SEMI)
MJAPI_FUNCTION_PTR = MJAPI + EXTERN + NAME("typename") + NAME("name") + SEMI
|
|
import pytest
from urlparse import urlparse
from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf_tests.factories import (
ProjectFactory,
CommentFactory,
RegistrationFactory,
InstitutionFactory,
WithdrawnRegistrationFactory,
)
class TestWithdrawnRegistrations(NodeCRUDTestCase):
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def registration(self, user, project_public, institution_one):
registration = RegistrationFactory(creator=user, project=project_public)
registration.affiliated_institutions.add(institution_one)
return registration
@pytest.fixture()
def registration_with_child(self, user, project_public):
project = ProjectFactory(creator=user, is_public=True)
child = ProjectFactory(creator=user, is_public=True, parent=project)
registration = RegistrationFactory(project=project, is_public=True)
RegistrationFactory(project=child, is_public=True)
return registration
@pytest.fixture()
def withdrawn_registration_with_child(self, user, registration_with_child):
withdrawn_registration = WithdrawnRegistrationFactory(
registration=registration_with_child, user=registration_with_child.creator)
withdrawn_registration.justification = 'We made a major error.'
withdrawn_registration.save()
return withdrawn_registration
@pytest.fixture()
def withdrawn_registration(self, registration):
withdrawn_registration = WithdrawnRegistrationFactory(
registration=registration, user=registration.creator)
withdrawn_registration.justification = 'We made a major error.'
withdrawn_registration.save()
return withdrawn_registration
@pytest.fixture()
def project_pointer_public(self):
return ProjectFactory(is_public=True)
@pytest.fixture()
def pointer_public(self, user, project_public, project_pointer_public):
return project_public.add_pointer(
project_pointer_public, auth=Auth(user), save=True)
@pytest.fixture()
def url_withdrawn(self, registration):
return '/{}registrations/{}/?version=2.2'.format(
API_BASE, registration._id)
def test_can_access_withdrawn_contributors(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/contributors/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_can_access_withdrawn_contributor_detail(
self, app, user, registration):
url = '/{}registrations/{}/contributors/{}/'.format(
API_BASE, registration._id, user._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_cannot_errors(
self, app, user, project_public, registration,
withdrawn_registration, pointer_public):
# test_cannot_access_withdrawn_children
url = '/{}registrations/{}/children/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_cannot_delete_a_withdrawn_registration
url = '/{}registrations/{}/'.format(API_BASE, registration._id)
res = app.delete_json_api(url, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 405
# test_cannot_access_withdrawn_files_list
url = '/{}registrations/{}/files/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_node_links_detail
url = '/{}registrations/{}/node_links/{}/'.format(
API_BASE, registration._id, pointer_public._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_node_links_list
url = '/{}registrations/{}/node_links/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_registrations_list
registration.save()
url = '/{}registrations/{}/registrations/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_cannot_access_withdrawn_comments(
self, app, user, project_public, pointer_public,
registration, withdrawn_registration):
project_public = ProjectFactory(is_public=True, creator=user)
CommentFactory(node=project_public, user=user)
url = '/{}registrations/{}/comments/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_cannot_access_withdrawn_node_logs(
self, app, user, project_public, pointer_public,
registration, withdrawn_registration):
ProjectFactory(is_public=True, creator=user)
url = '/{}registrations/{}/logs/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_withdrawn_registrations_display_limited_attributes_fields(
self, app, user, registration, withdrawn_registration, url_withdrawn):
registration = registration
res = app.get(url_withdrawn, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
registration.reload()
expected_attributes = {
'title': registration.title,
'description': registration.description,
'date_created': registration.created.isoformat().replace(
'+00:00',
'Z'),
'date_registered': registration.registered_date.isoformat().replace(
'+00:00',
'Z'),
'date_modified': registration.last_logged.isoformat().replace(
'+00:00',
'Z'),
'date_withdrawn': registration.retraction.date_retracted.isoformat().replace(
'+00:00',
'Z'),
'withdrawal_justification': registration.retraction.justification,
'public': None,
'registration': True,
'fork': None,
'collection': None,
'tags': None,
'withdrawn': True,
'pending_withdrawal': None,
'pending_registration_approval': None,
'pending_embargo_approval': None,
'embargo_end_date': None,
'registered_meta': None,
'current_user_permissions': None,
'registration_supplement': registration.registered_schema.first().name}
for attribute in expected_attributes:
assert expected_attributes[attribute] == attributes[attribute]
contributors = urlparse(
res.json['data']['relationships']['contributors']['links']['related']['href']).path
assert contributors == '/{}registrations/{}/contributors/'.format(
API_BASE, registration._id)
def test_withdrawn_registrations_display_limited_relationship_fields(
self, app, user, registration, withdrawn_registration):
url_withdrawn = '/{}registrations/{}/?version=2.14'.format(API_BASE, registration._id)
res = app.get(url_withdrawn, auth=user.auth)
assert 'children' not in res.json['data']['relationships']
assert 'comments' not in res.json['data']['relationships']
assert 'node_links' not in res.json['data']['relationships']
assert 'registrations' not in res.json['data']['relationships']
assert 'parent' in res.json['data']['relationships']
assert 'forked_from' not in res.json['data']['relationships']
assert 'files' not in res.json['data']['relationships']
assert 'logs' not in res.json['data']['relationships']
assert 'registered_by' not in res.json['data']['relationships']
assert 'registered_from' in res.json['data']['relationships']
assert 'root' in res.json['data']['relationships']
assert 'affiliated_institutions' in res.json['data']['relationships']
assert 'license' not in res.json['data']['relationships']
assert 'identifiers' in res.json['data']['relationships']
def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/?related_counts=children'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert 'children' not in res.json['data']['relationships']
assert 'contributors' in res.json['data']['relationships']
def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/?related_counts=contributors'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['relationships']['contributors']['links']['related']['meta']['count'] == 1
def test_child_inherits_withdrawl_justication_and_date_withdrawn(
self, app, user, withdrawn_registration_with_child, registration_with_child):
reg_child = registration_with_child.node_relations.first().child
url = '/{}registrations/{}/?version=2.2'.format(API_BASE, reg_child._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['withdrawal_justification'] == withdrawn_registration_with_child.justification
formatted_date_retracted = withdrawn_registration_with_child.date_retracted.isoformat().replace('+00:00', 'Z')
assert res.json['data']['attributes']['date_withdrawn'] == formatted_date_retracted
|
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1ContainerStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'state': 'V1ContainerState',
'last_state': 'V1ContainerState',
'ready': 'bool',
'restart_count': 'int',
'image': 'str',
'image_id': 'str',
'container_id': 'str'
}
self.attribute_map = {
'name': 'name',
'state': 'state',
'last_state': 'lastState',
'ready': 'ready',
'restart_count': 'restartCount',
'image': 'image',
'image_id': 'imageID',
'container_id': 'containerID'
}
self._name = None
self._state = None
self._last_state = None
self._ready = None
self._restart_count = None
self._image = None
self._image_id = None
self._container_id = None
@property
def name(self):
"""
Gets the name of this V1ContainerStatus.
name of the container; must be a DNS_LABEL and unique within the pod; cannot be updated
:return: The name of this V1ContainerStatus.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerStatus.
name of the container; must be a DNS_LABEL and unique within the pod; cannot be updated
:param name: The name of this V1ContainerStatus.
:type: str
"""
self._name = name
@property
def state(self):
"""
Gets the state of this V1ContainerStatus.
details about the container's current condition
:return: The state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this V1ContainerStatus.
details about the container's current condition
:param state: The state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._state = state
@property
def last_state(self):
"""
Gets the last_state of this V1ContainerStatus.
details about the container's last termination condition
:return: The last_state of this V1ContainerStatus.
:rtype: V1ContainerState
"""
return self._last_state
@last_state.setter
def last_state(self, last_state):
"""
Sets the last_state of this V1ContainerStatus.
details about the container's last termination condition
:param last_state: The last_state of this V1ContainerStatus.
:type: V1ContainerState
"""
self._last_state = last_state
@property
def ready(self):
"""
Gets the ready of this V1ContainerStatus.
specifies whether the container has passed its readiness probe
:return: The ready of this V1ContainerStatus.
:rtype: bool
"""
return self._ready
@ready.setter
def ready(self, ready):
"""
Sets the ready of this V1ContainerStatus.
specifies whether the container has passed its readiness probe
:param ready: The ready of this V1ContainerStatus.
:type: bool
"""
self._ready = ready
@property
def restart_count(self):
"""
Gets the restart_count of this V1ContainerStatus.
the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed
:return: The restart_count of this V1ContainerStatus.
:rtype: int
"""
return self._restart_count
@restart_count.setter
def restart_count(self, restart_count):
"""
Sets the restart_count of this V1ContainerStatus.
the number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed
:param restart_count: The restart_count of this V1ContainerStatus.
:type: int
"""
self._restart_count = restart_count
@property
def image(self):
"""
Gets the image of this V1ContainerStatus.
image of the container; see http://releases.k8s.io/v1.0.4/docs/images.md
:return: The image of this V1ContainerStatus.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1ContainerStatus.
image of the container; see http://releases.k8s.io/v1.0.4/docs/images.md
:param image: The image of this V1ContainerStatus.
:type: str
"""
self._image = image
@property
def image_id(self):
"""
Gets the image_id of this V1ContainerStatus.
ID of the container's image
:return: The image_id of this V1ContainerStatus.
:rtype: str
"""
return self._image_id
@image_id.setter
def image_id(self, image_id):
"""
Sets the image_id of this V1ContainerStatus.
ID of the container's image
:param image_id: The image_id of this V1ContainerStatus.
:type: str
"""
self._image_id = image_id
@property
def container_id(self):
"""
Gets the container_id of this V1ContainerStatus.
container's ID in the format 'docker://<container_id>'; see http://releases.k8s.io/v1.0.4/docs/container-environment.md#container-information
:return: The container_id of this V1ContainerStatus.
:rtype: str
"""
return self._container_id
@container_id.setter
def container_id(self, container_id):
"""
Sets the container_id of this V1ContainerStatus.
container's ID in the format 'docker://<container_id>'; see http://releases.k8s.io/v1.0.4/docs/container-environment.md#container-information
:param container_id: The container_id of this V1ContainerStatus.
:type: str
"""
self._container_id = container_id
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
|
"""
Provide functionality to TTS.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/tts/
"""
import asyncio
import functools
import hashlib
import logging
import mimetypes
import os
import re
from aiohttp import web
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.bootstrap import async_prepare_setup_platform
from homeassistant.core import callback
from homeassistant.config import load_yaml_config_file
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player import (
SERVICE_PLAY_MEDIA, MEDIA_TYPE_MUSIC, ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE, DOMAIN as DOMAIN_MP)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform
import homeassistant.helpers.config_validation as cv
DOMAIN = 'tts'
DEPENDENCIES = ['http']
_LOGGER = logging.getLogger(__name__)
MEM_CACHE_FILENAME = 'filename'
MEM_CACHE_VOICE = 'voice'
CONF_LANG = 'language'
CONF_CACHE = 'cache'
CONF_CACHE_DIR = 'cache_dir'
CONF_TIME_MEMORY = 'time_memory'
DEFAULT_CACHE = True
DEFAULT_CACHE_DIR = "tts"
DEFAULT_TIME_MEMORY = 300
SERVICE_SAY = 'say'
SERVICE_CLEAR_CACHE = 'clear_cache'
ATTR_MESSAGE = 'message'
ATTR_CACHE = 'cache'
ATTR_LANGUAGE = 'language'
_RE_VOICE_FILE = re.compile(r"([a-f0-9]{40})_([^_]+)_([a-z]+)\.[a-z0-9]{3,4}")
KEY_PATTERN = '{}_{}_{}'
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean,
vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string,
vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY):
vol.All(vol.Coerce(int), vol.Range(min=60, max=57600)),
})
SCHEMA_SERVICE_SAY = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_CACHE): cv.boolean,
vol.Optional(ATTR_LANGUAGE): cv.string
})
SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
@asyncio.coroutine
def async_setup(hass, config):
"""Setup TTS."""
tts = SpeechManager(hass)
try:
conf = config[DOMAIN][0] if len(config.get(DOMAIN, [])) > 0 else {}
use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
yield from tts.async_init_cache(use_cache, cache_dir, time_memory)
except (HomeAssistantError, KeyError) as err:
_LOGGER.error("Error on cache init %s", err)
return False
hass.http.register_view(TextToSpeechView(tts))
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
@asyncio.coroutine
def async_setup_platform(p_type, p_config, disc_info=None):
"""Setup a tts platform."""
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, 'async_get_engine'):
provider = yield from platform.async_get_engine(
hass, p_config)
else:
provider = yield from hass.loop.run_in_executor(
None, platform.get_engine, hass, p_config)
if provider is None:
_LOGGER.error('Error setting up platform %s', p_type)
return
tts.async_register_engine(p_type, provider, p_config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
@asyncio.coroutine
def async_say_handle(service):
"""Service handle for say."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
message = service.data.get(ATTR_MESSAGE)
cache = service.data.get(ATTR_CACHE)
language = service.data.get(ATTR_LANGUAGE)
try:
url = yield from tts.async_get_url(
p_type, message, cache=cache, language=language)
except HomeAssistantError as err:
_LOGGER.error("Error on init tts: %s", err)
return
data = {
ATTR_MEDIA_CONTENT_ID: url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
}
if entity_ids:
data[ATTR_ENTITY_ID] = entity_ids
yield from hass.services.async_call(
DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True)
hass.services.async_register(
DOMAIN, "{}_{}".format(p_type, SERVICE_SAY), async_say_handle,
descriptions.get(SERVICE_SAY), schema=SCHEMA_SERVICE_SAY)
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
yield from asyncio.wait(setup_tasks, loop=hass.loop)
@asyncio.coroutine
def async_clear_cache_handle(service):
"""Handle clear cache service call."""
yield from tts.async_clear_cache()
hass.services.async_register(
DOMAIN, SERVICE_CLEAR_CACHE, async_clear_cache_handle,
descriptions.get(SERVICE_CLEAR_CACHE),
schema=SCHEMA_SERVICE_CLEAR_CACHE)
return True
class SpeechManager(object):
"""Representation of a speech store."""
def __init__(self, hass):
"""Initialize a speech store."""
self.hass = hass
self.providers = {}
self.use_cache = DEFAULT_CACHE
self.cache_dir = DEFAULT_CACHE_DIR
self.time_memory = DEFAULT_TIME_MEMORY
self.file_cache = {}
self.mem_cache = {}
@asyncio.coroutine
def async_init_cache(self, use_cache, cache_dir, time_memory):
"""Init config folder and load file cache."""
self.use_cache = use_cache
self.time_memory = time_memory
def init_tts_cache_dir(cache_dir):
"""Init cache folder."""
if not os.path.isabs(cache_dir):
cache_dir = self.hass.config.path(cache_dir)
if not os.path.isdir(cache_dir):
_LOGGER.info("Create cache dir %s.", cache_dir)
os.mkdir(cache_dir)
return cache_dir
try:
self.cache_dir = yield from self.hass.loop.run_in_executor(
None, init_tts_cache_dir, cache_dir)
except OSError as err:
raise HomeAssistantError(
"Can't init cache dir {}".format(err))
def get_cache_files():
"""Return a dict of given engine files."""
cache = {}
folder_data = os.listdir(self.cache_dir)
for file_data in folder_data:
record = _RE_VOICE_FILE.match(file_data)
if record:
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3))
cache[key.lower()] = file_data.lower()
return cache
try:
cache_files = yield from self.hass.loop.run_in_executor(
None, get_cache_files)
except OSError as err:
raise HomeAssistantError(
"Can't read cache dir {}".format(err))
if cache_files:
self.file_cache.update(cache_files)
@asyncio.coroutine
def async_clear_cache(self):
"""Read file cache and delete files."""
self.mem_cache = {}
def remove_files():
"""Remove files from filesystem."""
for _, filename in self.file_cache.items():
try:
os.remove(os.path.join(self.cache_dir, filename))
except OSError:
pass
yield from self.hass.loop.run_in_executor(None, remove_files)
self.file_cache = {}
@callback
def async_register_engine(self, engine, provider, config):
"""Register a TTS provider."""
provider.hass = self.hass
if CONF_LANG in config:
provider.language = config.get(CONF_LANG)
self.providers[engine] = provider
@asyncio.coroutine
def async_get_url(self, engine, message, cache=None, language=None):
"""Get URL for play message.
This method is a coroutine.
"""
msg_hash = hashlib.sha1(bytes(message, 'utf-8')).hexdigest()
language_key = language or self.providers[engine].language
key = KEY_PATTERN.format(msg_hash, language_key, engine).lower()
use_cache = cache if cache is not None else self.use_cache
# is speech allready in memory
if key in self.mem_cache:
filename = self.mem_cache[key][MEM_CACHE_FILENAME]
# is file store in file cache
elif use_cache and key in self.file_cache:
filename = self.file_cache[key]
self.hass.async_add_job(self.async_file_to_mem(key))
# load speech from provider into memory
else:
filename = yield from self.async_get_tts_audio(
engine, key, message, use_cache, language)
return "{}/api/tts_proxy/{}".format(
self.hass.config.api.base_url, filename)
@asyncio.coroutine
def async_get_tts_audio(self, engine, key, message, cache, language):
"""Receive TTS and store for view in cache.
This method is a coroutine.
"""
provider = self.providers[engine]
extension, data = yield from provider.async_get_tts_audio(
message, language)
if data is None or extension is None:
raise HomeAssistantError(
"No TTS from {} for '{}'".format(engine, message))
# create file infos
filename = ("{}.{}".format(key, extension)).lower()
# save to memory
self._async_store_to_memcache(key, filename, data)
if cache:
self.hass.async_add_job(
self.async_save_tts_audio(key, filename, data))
return filename
@asyncio.coroutine
def async_save_tts_audio(self, key, filename, data):
"""Store voice data to file and file_cache.
This method is a coroutine.
"""
voice_file = os.path.join(self.cache_dir, filename)
def save_speech():
"""Store speech to filesystem."""
with open(voice_file, 'wb') as speech:
speech.write(data)
try:
yield from self.hass.loop.run_in_executor(None, save_speech)
self.file_cache[key] = filename
except OSError:
_LOGGER.error("Can't write %s", filename)
@asyncio.coroutine
def async_file_to_mem(self, key):
"""Load voice from file cache into memory.
This method is a coroutine.
"""
filename = self.file_cache.get(key)
if not filename:
raise HomeAssistantError("Key {} not in file cache!".format(key))
voice_file = os.path.join(self.cache_dir, filename)
def load_speech():
"""Load a speech from filesystem."""
with open(voice_file, 'rb') as speech:
return speech.read()
try:
data = yield from self.hass.loop.run_in_executor(None, load_speech)
except OSError:
raise HomeAssistantError("Can't read {}".format(voice_file))
self._async_store_to_memcache(key, filename, data)
@callback
def _async_store_to_memcache(self, key, filename, data):
"""Store data to memcache and set timer to remove it."""
self.mem_cache[key] = {
MEM_CACHE_FILENAME: filename,
MEM_CACHE_VOICE: data,
}
@callback
def async_remove_from_mem():
"""Cleanup memcache."""
self.mem_cache.pop(key)
self.hass.loop.call_later(self.time_memory, async_remove_from_mem)
@asyncio.coroutine
def async_read_tts(self, filename):
"""Read a voice file and return binary.
This method is a coroutine.
"""
record = _RE_VOICE_FILE.match(filename.lower())
if not record:
raise HomeAssistantError("Wrong tts file format!")
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3))
if key not in self.mem_cache:
if key not in self.file_cache:
raise HomeAssistantError("%s not in cache!", key)
yield from self.async_file_to_mem(key)
content, _ = mimetypes.guess_type(filename)
return (content, self.mem_cache[key][MEM_CACHE_VOICE])
class Provider(object):
"""Represent a single provider."""
hass = None
language = None
def get_tts_audio(self, message, language=None):
"""Load tts audio file from provider."""
raise NotImplementedError()
def async_get_tts_audio(self, message, language=None):
"""Load tts audio file from provider.
Return a tuple of file extension and data as bytes.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None,
functools.partial(self.get_tts_audio, message, language=language))
class TextToSpeechView(HomeAssistantView):
"""TTS view to serve an speech audio."""
requires_auth = False
url = "/api/tts_proxy/{filename}"
name = "api:tts:speech"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
@asyncio.coroutine
def get(self, request, filename):
"""Start a get request."""
try:
content, data = yield from self.tts.async_read_tts(filename)
except HomeAssistantError as err:
_LOGGER.error("Error on load tts: %s", err)
return web.Response(status=404)
return web.Response(body=data, content_type=content)
|
|
# Copyright (c) 2016-2021 Adobe Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Legacy UMAPI api from version 1.0 of this library, for backward compatibility.
Clients are strongly encouraged to move to the current version of the API.
"""
# TODO: remove this file when all clients have migrated
import logging
from email.utils import parsedate_tz, mktime_tz
from math import pow
from random import randint
from sys import maxsize
from time import time, sleep
import requests
from .api import Action as NewAction
from .connection import Connection
from .error import RequestError, ServerError, UnavailableError, ArgumentError
# make the retry options module-global so they can be set by clients
retry_max_attempts = 4
retry_exponential_backoff_factor = 15 # seconds
retry_random_delay_max = 5 # seconds
# make the logger module-global so it can be set by clients
logger = logging.getLogger(__name__)
class Action(NewAction):
"""
Here for compatibility with legacy clients only - DO NOT USE!!!
"""
def __init__(self, user=None, user_key=None, **kwargs):
if user is None and user_key is None:
ArgumentError("Must specify one of user or user_key")
if user and user_key:
ArgumentError("Must specify only one of user or user_key")
if user_key:
user = user_key
NewAction.__init__(self, user=user, **kwargs)
def do(self, **kwargs):
"""
Here for compatibility with legacy clients only - DO NOT USE!!!
This is sort of mix of "append" and "insert": it puts commands in the list,
with some half smarts about which commands go at the front or back.
If you add multiple commands to the back in one call, they will get added sorted by command name.
:param kwargs: the commands in key=val format
:return: the Action, so you can do Action(...).do(...).do(...)
"""
# add "create" / "add" / "removeFrom" first
for k, v in list(kwargs.items()):
if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"):
self.commands.append({k: v})
del kwargs[k]
# now do the other actions, in a canonical order (to avoid py2/py3 variations)
for k, v in sorted(kwargs.items()):
if k in ['add', 'remove']:
self.commands.append({k: {"product": v}})
else:
self.commands.append({k: v})
return self
class UMAPI:
"""
This is here for legacy compatibility ONLY -- DO NOT USE!!!
The UMAPI object is an authenticated connection that doesn't know the
organization it was authenticated against, which makes no sense.
This re-implementation uses a connection under the covers, and it
carefully checks to see that you are using the connection against
the right organization!
"""
def __init__(self, endpoint=None, auth=None, test_mode=False, conn=None, **kwargs):
if isinstance(conn, Connection) and (endpoint is None) and (auth is None):
# given a connection, remember the org_id it's for.
self.conn = conn
self.org_id = conn.org_id
elif auth and endpoint and (conn is None):
# wait until we have an org_id to make the connection
self.conn = None
self.org_id = None
self.conn_options = dict(kwargs)
self.auth = auth
self.endpoint = str(endpoint)
self.test_mode = test_mode
else:
ArgumentError("UMAPI create: you must specify either auth and endpoint, or conn, but not both")
def _get_conn(self, org_id):
if not self.conn:
self.org_id = org_id
self.conn = Connection(org_id=org_id, auth=self.auth,
user_management_endpoint=self.endpoint,
test_mode=self.test_mode,
**self.conn_options)
else:
if org_id != self.org_id:
ArgumentError("OrganizationID (%s) does not match that in access token (%s)" % (org_id, self.org_id))
def users(self, org_id, page=0):
self._get_conn(org_id)
return self._call('/users/%s/%d' % (org_id, page), requests.get)
def groups(self, org_id, page=0):
self._get_conn(org_id)
return self._call('/groups/%s/%d' % (org_id, page), requests.get)
def action(self, org_id, action):
self._get_conn(org_id)
if not isinstance(action, Action):
if not isinstance(action, str) and (hasattr(action, "__getitem__") or hasattr(action, "__iter__")):
actions = [a.wire_dict() for a in action]
else:
raise ActionFormatError("action must be iterable, indexable or Action object")
else:
actions = [action.wire_dict()]
if self.test_mode:
return self._call('/action/%s?testOnly=true' % org_id, requests.post, actions)
else:
return self._call('/action/%s' % org_id, requests.post, actions)
def _call(self, method, call, params=None):
assert (call is requests.get) is (params is None)
try:
res = self.conn.make_call(method, params)
if res.status_code == 200:
result = res.json()
if "result" in result:
if result["result"] == "error":
raise UMAPIRequestError(result["errors"][0]["errorCode"])
else:
return result
else:
raise UMAPIRequestError("Request Error -- Unknown Result Status")
except UnavailableError as ue:
raise UMAPIRetryError(ue.result)
except (RequestError, ServerError) as e:
raise UMAPIError(e.result)
class UMAPIError(Exception):
def __init__(self, res):
Exception.__init__(self, "UMAPI Error: "+str(res.status_code))
self.res = res
class UMAPIRetryError(Exception):
def __init__(self, res):
Exception.__init__(self, "UMAPI Error: "+str(res.status_code))
self.res = res
class UMAPIRequestError(Exception):
def __init__(self, code):
Exception.__init__(self, "Request Error -- %s" % code)
self.code = code
class ActionFormatError(Exception):
pass
def paginate(query, org_id, max_pages=maxsize, max_records=maxsize):
"""
Paginate through all results of a UMAPI query
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param max_pages: the max number of pages to collect before returning (default all)
:param max_records: the max number of records to collect before returning (default all)
:return: the queried records
"""
page_count = 0
record_count = 0
records = []
while page_count < max_pages and record_count < max_records:
res = make_call(query, org_id, page_count)
page_count += 1
# the following incredibly ugly piece of code is very fragile.
# the problem is that we are a "dumb helper" that doesn't understand
# the semantics of the UMAPI or know which query we were given.
if "groups" in res:
records += res["groups"]
elif "users" in res:
records += res["users"]
record_count = len(records)
if res.get("lastPage"):
break
return records
def make_call(query, org_id, page):
"""
Make a single UMAPI call with error handling and server-controlled throttling.
(Adapted from sample code at https://www.adobe.io/products/usermanagement/docs/samples#retry)
:param query: a query method from a UMAPI instance (callable as a function)
:param org_id: the organization being queried
:param page: the page number of the desired result set
:return: the json (dictionary) received from the server (if any)
"""
wait_time = 0
num_attempts = 0
while num_attempts < retry_max_attempts:
if wait_time > 0:
sleep(wait_time)
wait_time = 0
try:
num_attempts += 1
return query(org_id, page)
except UMAPIRetryError as e:
logger.warning("UMAPI service temporarily unavailable (attempt %d) -- %s", num_attempts, e.res.status_code)
if "Retry-After" in e.res.headers:
advice = e.res.headers["Retry-After"]
advised_time = parsedate_tz(advice)
if advised_time is not None:
# header contains date
wait_time = int(mktime_tz(advised_time) - time())
else:
# header contains delta seconds
wait_time = int(advice)
if wait_time <= 0:
# use exponential back-off with random delay
delay = randint(0, retry_random_delay_max)
wait_time = (int(pow(2, num_attempts)) * retry_exponential_backoff_factor) + delay
logger.warning("Next retry in %d seconds...", wait_time)
continue
except UMAPIRequestError as e:
logger.warning("UMAPI error processing request -- %s", e.code)
return {}
except UMAPIError as e:
logger.warning("HTTP error processing request -- %s: %s", e.res.status_code, e.res.text)
return {}
logger.error("UMAPI timeout...giving up on results page %d after %d attempts.", page, retry_max_attempts)
return {}
|
|
# Copyright (c) 2012-2014 roger
# Copyright (c) 2012-2015 Tycho Andersen
# Copyright (c) 2013 dequis
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cairocffi
from .. import bar, hook
from . import base
class TaskList(base._Widget, base.PaddingMixin, base.MarginMixin):
defaults = [
("font", "Arial", "Default font"),
("fontsize", None, "Font size. Calculated if None."),
("foreground", "ffffff", "Foreground colour"),
(
"fontshadow",
None,
"font shadow color, default is None(no shadow)"
),
("borderwidth", 2, "Current group border width"),
("border", "215578", "Border colour"),
("rounded", True, "To round or not to round borders"),
(
"highlight_method",
"border",
"Method of highlighting (one of 'border' or 'block') "
"Uses \*_border color settings"
),
("urgent_border", "FF0000", "Urgent border color"),
(
"urgent_alert_method",
"border",
"Method for alerting you of WM urgent "
"hints (one of 'border' or 'text')"
),
("max_title_width", 200, "size in pixels of task title")
]
def __init__(self, **config):
base._Widget.__init__(self, bar.STRETCH, **config)
self.add_defaults(TaskList.defaults)
self.add_defaults(base.PaddingMixin.defaults)
self.add_defaults(base.MarginMixin.defaults)
self._icons_cache = {}
def box_width(self, text):
width, _ = self.drawer.max_layout_size(
[text],
self.font,
self.fontsize
)
width = width + self.padding_x * 2 + \
self.margin_x * 2 + self.borderwidth * 2
if width > self.max_title_width:
width = self.max_title_width
return width
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
self.icon_size = self.bar.height - (self.borderwidth + 2) * 2
if self.fontsize is None:
calc = self.bar.height - self.margin_y * 2 - \
self.borderwidth * 2 - self.padding_y * 2
self.fontsize = max(calc, 1)
self.layout = self.drawer.textlayout(
"",
"ffffff",
self.font,
self.fontsize,
self.fontshadow,
wrap=False
)
self.setup_hooks()
def update(self, window=None):
group = self.bar.screen.group
if not window or window and window.group is group:
self.bar.draw()
def remove_icon_cache(self, window):
wid = window.window.wid
if wid in self._icons_cache:
self._icons_cache.pop(wid)
def invalidate_cache(self, window):
self.remove_icon_cache(window)
self.update(window)
def setup_hooks(self):
hook.subscribe.window_name_change(self.update)
hook.subscribe.focus_change(self.update)
hook.subscribe.float_change(self.update)
hook.subscribe.client_urgent_hint_changed(self.update)
hook.subscribe.net_wm_icon_change(self.invalidate_cache)
hook.subscribe.client_killed(self.remove_icon_cache)
def drawtext(self, text, textcolor, width):
self.layout.text = text
self.layout.font_family = self.font
self.layout.font_size = self.fontsize
self.layout.colour = textcolor
if width is not None:
self.layout.width = width
def drawbox(self, offset, text, bordercolor, textcolor, rounded=False,
block=False, width=None):
self.drawtext(text, textcolor, width)
padding_x = [self.padding_x + self.icon_size + 4, self.padding_x]
framed = self.layout.framed(
self.borderwidth,
bordercolor,
padding_x,
self.padding_y
)
if block:
framed.draw_fill(offset, self.margin_y, rounded)
else:
framed.draw(offset, self.margin_y, rounded)
def get_clicked(self, x, y):
window = None
new_width = width = 0
for w in self.bar.screen.group.windows:
new_width += self.icon_size + self.box_width(w.name)
if x >= width and x <= new_width:
window = w
break
width = new_width
return window
def button_press(self, x, y, button):
window = None
current_win = self.bar.screen.group.currentWindow
# TODO: support scroll
if button == 1:
window = self.get_clicked(x, y)
if window and window is not current_win:
window.group.focus(window, False)
if window.floating:
window.cmd_bring_to_front()
def get_window_icon(self, window):
cache = self._icons_cache.get(window.window.wid)
if cache:
return cache
icons = sorted(
iter(window.icons.items()),
key=lambda x: abs(self.icon_size - int(x[0].split("x")[0]))
)
icon = icons[0]
width, height = map(int, icon[0].split("x"))
img = cairocffi.ImageSurface.create_for_data(
icon[1],
cairocffi.FORMAT_ARGB32,
width,
height
)
surface = cairocffi.SurfacePattern(img)
scaler = cairocffi.Matrix()
if height != self.icon_size:
sp = height / float(self.icon_size)
height = self.icon_size
width = width / sp
scaler.scale(sp, sp)
surface.set_matrix(scaler)
self._icons_cache[window.window.wid] = surface
return surface
def draw_icon(self, window, offset):
if not window.icons:
return
x = offset + self.padding_x + self.borderwidth + 2 + self.margin_x
y = self.padding_y + self.borderwidth
surface = self.get_window_icon(window)
self.drawer.ctx.save()
self.drawer.ctx.translate(x, y)
self.drawer.ctx.set_source(surface)
self.drawer.ctx.paint()
self.drawer.ctx.restore()
def draw(self):
self.drawer.clear(self.background or self.bar.background)
offset = 0
for w in self.bar.screen.group.windows:
state = ''
if w is None:
pass
elif w.maximized:
state = '[] '
elif w.minimized:
state = '_ '
elif w.floating:
state = 'V '
task = "%s%s" % (state, w.name if w and w.name else " ")
if w.urgent:
border = self.urgent_border
elif w is w.group.currentWindow:
border = self.border
else:
border = self.background or self.bar.background
bw = self.box_width(task)
self.drawbox(
self.margin_x + offset,
task,
border,
self.foreground,
self.rounded,
self.highlight_method == 'block',
bw - self.margin_x * 2 - self.padding_x * 2
)
self.draw_icon(w, offset)
offset += bw + self.icon_size
self.drawer.draw(self.offset, self.width)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import contextlib
import mock
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_manager as manager
from neutron.tests import base
class TestManager(base.BaseTestCase):
def setUp(self):
super(TestManager, self).setUp()
self.addCleanup(mock.patch.stopall)
mock_conf = mock.Mock()
mock_conf.device_driver = ['devdriver']
self.mock_importer = mock.patch.object(manager, 'importutils').start()
rpc_mock_cls = mock.patch(
'neutron.services.loadbalancer.agent.agent_api.LbaasAgentApi'
).start()
self.mgr = manager.LbaasAgentManager(mock_conf)
self.rpc_mock = rpc_mock_cls.return_value
self.log = mock.patch.object(manager, 'LOG').start()
self.driver_mock = mock.Mock()
self.mgr.device_drivers = {'devdriver': self.driver_mock}
self.mgr.instance_mapping = {'1': 'devdriver', '2': 'devdriver'}
self.mgr.needs_resync = False
def test_initialize_service_hook(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.initialize_service_hook(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_needs_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = True
self.mgr.periodic_resync(mock.Mock())
sync.assert_called_once_with()
def test_periodic_resync_no_sync(self):
with mock.patch.object(self.mgr, 'sync_state') as sync:
self.mgr.needs_resync = False
self.mgr.periodic_resync(mock.Mock())
self.assertFalse(sync.called)
def test_collect_stats(self):
self.mgr.collect_stats(mock.Mock())
self.rpc_mock.update_pool_stats.assert_has_calls([
mock.call('1', mock.ANY),
mock.call('2', mock.ANY)
])
def test_collect_stats_exception(self):
self.driver_mock.get_stats.side_effect = Exception
self.mgr.collect_stats(mock.Mock())
self.assertFalse(self.rpc_mock.called)
self.assertTrue(self.mgr.needs_resync)
self.assertTrue(self.log.exception.called)
def _sync_state_helper(self, ready, reloaded, destroyed):
with contextlib.nested(
mock.patch.object(self.mgr, '_reload_pool'),
mock.patch.object(self.mgr, '_destroy_pool')
) as (reload, destroy):
self.rpc_mock.get_ready_devices.return_value = ready
self.mgr.sync_state()
self.assertEqual(len(reloaded), len(reload.mock_calls))
self.assertEqual(len(destroyed), len(destroy.mock_calls))
reload.assert_has_calls([mock.call(i) for i in reloaded])
destroy.assert_has_calls([mock.call(i) for i in destroyed])
self.assertFalse(self.mgr.needs_resync)
def test_sync_state_all_known(self):
self._sync_state_helper(['1', '2'], ['1', '2'], [])
def test_sync_state_all_unknown(self):
self.mgr.instance_mapping = {}
self._sync_state_helper(['1', '2'], ['1', '2'], [])
def test_sync_state_destroy_all(self):
self._sync_state_helper([], [], ['1', '2'])
def test_sync_state_both(self):
self.mgr.instance_mapping = {'1': 'devdriver'}
self._sync_state_helper(['2'], ['2'], ['1'])
def test_sync_state_exception(self):
self.rpc_mock.get_ready_devices.side_effect = Exception
self.mgr.sync_state()
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_reload_pool(self):
config = {'driver': 'devdriver'}
self.rpc_mock.get_logical_device.return_value = config
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.driver_mock.deploy_instance.assert_called_once_with(config)
self.assertIn(pool_id, self.mgr.instance_mapping)
self.rpc_mock.pool_deployed.assert_called_once_with(pool_id)
def test_reload_pool_driver_not_found(self):
config = {'driver': 'unknown_driver'}
self.rpc_mock.get_logical_device.return_value = config
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.assertTrue(self.log.error.called)
self.assertFalse(self.driver_mock.deploy_instance.called)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_deployed.called)
def test_reload_pool_exception_on_driver(self):
config = {'driver': 'devdriver'}
self.rpc_mock.get_logical_device.return_value = config
self.driver_mock.deploy_instance.side_effect = Exception
pool_id = 'new_id'
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.mgr._reload_pool(pool_id)
self.driver_mock.deploy_instance.assert_called_once_with(config)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_deployed.called)
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_destroy_pool(self):
pool_id = '1'
self.assertIn(pool_id, self.mgr.instance_mapping)
self.mgr._destroy_pool(pool_id)
self.driver_mock.undeploy_instance.assert_called_once_with(pool_id)
self.assertNotIn(pool_id, self.mgr.instance_mapping)
self.rpc_mock.pool_destroyed.assert_called_once_with(pool_id)
self.assertFalse(self.mgr.needs_resync)
def test_destroy_pool_exception_on_driver(self):
pool_id = '1'
self.assertIn(pool_id, self.mgr.instance_mapping)
self.driver_mock.undeploy_instance.side_effect = Exception
self.mgr._destroy_pool(pool_id)
self.driver_mock.undeploy_instance.assert_called_once_with(pool_id)
self.assertIn(pool_id, self.mgr.instance_mapping)
self.assertFalse(self.rpc_mock.pool_destroyed.called)
self.assertTrue(self.log.exception.called)
self.assertTrue(self.mgr.needs_resync)
def test_get_driver_unknown_device(self):
self.assertRaises(manager.DeviceNotFoundOnAgent,
self.mgr._get_driver, 'unknown')
def test_remove_orphans(self):
self.mgr.remove_orphans()
self.driver_mock.remove_orphans.assert_called_once_with(['1', '2'])
def test_create_vip(self):
vip = {'id': 'id1', 'pool_id': '1'}
self.mgr.create_vip(mock.Mock(), vip)
self.driver_mock.create_vip.assert_called_once_with(vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ACTIVE)
def test_create_vip_failed(self):
vip = {'id': 'id1', 'pool_id': '1'}
self.driver_mock.create_vip.side_effect = Exception
self.mgr.create_vip(mock.Mock(), vip)
self.driver_mock.create_vip.assert_called_once_with(vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ERROR)
def test_update_vip(self):
old_vip = {'id': 'id1'}
vip = {'id': 'id1', 'pool_id': '1'}
self.mgr.update_vip(mock.Mock(), old_vip, vip)
self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ACTIVE)
def test_update_vip_failed(self):
old_vip = {'id': 'id1'}
vip = {'id': 'id1', 'pool_id': '1'}
self.driver_mock.update_vip.side_effect = Exception
self.mgr.update_vip(mock.Mock(), old_vip, vip)
self.driver_mock.update_vip.assert_called_once_with(old_vip, vip)
self.rpc_mock.update_status.assert_called_once_with('vip', vip['id'],
constants.ERROR)
def test_delete_vip(self):
vip = {'id': 'id1', 'pool_id': '1'}
self.mgr.delete_vip(mock.Mock(), vip)
self.driver_mock.delete_vip.assert_called_once_with(vip)
def test_create_pool(self):
pool = {'id': 'id1'}
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
self.driver_mock.create_pool.assert_called_once_with(pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ACTIVE)
self.assertIn(pool['id'], self.mgr.instance_mapping)
def test_create_pool_failed(self):
pool = {'id': 'id1'}
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
self.driver_mock.create_pool.side_effect = Exception
self.mgr.create_pool(mock.Mock(), pool, 'devdriver')
self.driver_mock.create_pool.assert_called_once_with(pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ERROR)
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
def test_update_pool(self):
old_pool = {'id': '1'}
pool = {'id': '1'}
self.mgr.update_pool(mock.Mock(), old_pool, pool)
self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ACTIVE)
def test_update_pool_failed(self):
old_pool = {'id': '1'}
pool = {'id': '1'}
self.driver_mock.update_pool.side_effect = Exception
self.mgr.update_pool(mock.Mock(), old_pool, pool)
self.driver_mock.update_pool.assert_called_once_with(old_pool, pool)
self.rpc_mock.update_status.assert_called_once_with('pool', pool['id'],
constants.ERROR)
def test_delete_pool(self):
pool = {'id': '1'}
self.assertIn(pool['id'], self.mgr.instance_mapping)
self.mgr.delete_pool(mock.Mock(), pool)
self.driver_mock.delete_pool.assert_called_once_with(pool)
self.assertNotIn(pool['id'], self.mgr.instance_mapping)
def test_create_member(self):
member = {'id': 'id1', 'pool_id': '1'}
self.mgr.create_member(mock.Mock(), member)
self.driver_mock.create_member.assert_called_once_with(member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ACTIVE)
def test_create_member_failed(self):
member = {'id': 'id1', 'pool_id': '1'}
self.driver_mock.create_member.side_effect = Exception
self.mgr.create_member(mock.Mock(), member)
self.driver_mock.create_member.assert_called_once_with(member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ERROR)
def test_update_member(self):
old_member = {'id': 'id1'}
member = {'id': 'id1', 'pool_id': '1'}
self.mgr.update_member(mock.Mock(), old_member, member)
self.driver_mock.update_member.assert_called_once_with(old_member,
member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ACTIVE)
def test_update_member_failed(self):
old_member = {'id': 'id1'}
member = {'id': 'id1', 'pool_id': '1'}
self.driver_mock.update_member.side_effect = Exception
self.mgr.update_member(mock.Mock(), old_member, member)
self.driver_mock.update_member.assert_called_once_with(old_member,
member)
self.rpc_mock.update_status.assert_called_once_with('member',
member['id'],
constants.ERROR)
def test_delete_member(self):
member = {'id': 'id1', 'pool_id': '1'}
self.mgr.delete_member(mock.Mock(), member)
self.driver_mock.delete_member.assert_called_once_with(member)
def test_create_monitor(self):
monitor = {'id': 'id1'}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.create_pool_health_monitor.assert_called_once_with(
monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ACTIVE)
def test_create_monitor_failed(self):
monitor = {'id': 'id1'}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.driver_mock.create_pool_health_monitor.side_effect = Exception
self.mgr.create_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.create_pool_health_monitor.assert_called_once_with(
monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ERROR)
def test_update_monitor(self):
monitor = {'id': 'id1'}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
self.driver_mock.update_pool_health_monitor.assert_called_once_with(
monitor, monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ACTIVE)
def test_update_monitor_failed(self):
monitor = {'id': 'id1'}
assoc_id = {'monitor_id': monitor['id'], 'pool_id': '1'}
self.driver_mock.update_pool_health_monitor.side_effect = Exception
self.mgr.update_pool_health_monitor(mock.Mock(), monitor, monitor, '1')
self.driver_mock.update_pool_health_monitor.assert_called_once_with(
monitor, monitor, '1')
self.rpc_mock.update_status.assert_called_once_with('health_monitor',
assoc_id,
constants.ERROR)
def test_delete_monitor(self):
monitor = {'id': 'id1'}
self.mgr.delete_pool_health_monitor(mock.Mock(), monitor, '1')
self.driver_mock.delete_pool_health_monitor.assert_called_once_with(
monitor, '1')
def test_agent_disabled(self):
payload = {'admin_state_up': False}
self.mgr.agent_updated(mock.Mock(), payload)
self.driver_mock.undeploy_instance.assert_has_calls(
[mock.call('1'), mock.call('2')])
|
|
import bme680
import time
import logging
from blinker import signal
class BME680:
BURN_IN_TIME_IN_SECONDS = 300
AIR_QUALITY_SAMPLE_TIME_IN_SECONDS = 60
COOL_DOWN_TEMPERATURE_DIFFERENCE = 0.25
def __init__(self, address):
logging.info('Initialising BME680 sensor with address {}'.format(address))
if address == 0x77:
address = bme680.I2C_ADDR_SECONDARY
logging.debug('Using secondary address')
else:
address = bme680.I2C_ADDR_PRIMARY
logging.debug('Using primary address')
self._air_quality_baseline_calculated = False
self._gas_baseline = None
self._sensor = bme680.BME680(i2c_addr=address)
self._sensor.set_gas_status(bme680.DISABLE_GAS_MEAS)
self._sensor.set_humidity_oversample(bme680.OS_2X)
self._sensor.set_pressure_oversample(bme680.OS_4X)
self._sensor.set_temperature_oversample(bme680.OS_8X)
self._sensor.set_filter(bme680.FILTER_SIZE_3)
def get_temperature(self):
"""
Return measured temperature from the device.
:return:
"""
logging.debug('Measuring temperature')
pending_measurement = True
temperature = None
if self._sensor.get_gas_status():
logging.info('Air quality is currently being measured, skipping temperature measurement')
return
while pending_measurement:
if self._sensor.get_sensor_data():
temperature = self._sensor.data.temperature
pending_measurement = False
logging.info('Temperature received from sensor: {}'.format(temperature))
logging.debug('Sensor data not ready yet, will try again...')
time.sleep(0.5)
logging.info('Broadcasting temperature: {}'.format(temperature))
temperature_signal = signal('temperature')
temperature_signal.send(self, temperature=temperature)
def get_humidity(self):
"""
Return measured humidity from the device.
:return:
"""
logging.debug('Measuring humidity')
pending_measurement = True
humidity = None
if self._sensor.get_gas_status():
logging.info('Air quality is currently being measured, skipping humidity measurement')
return
while pending_measurement:
if self._sensor.get_sensor_data():
humidity = self._sensor.data.humidity
pending_measurement = False
logging.info('Humidity received from sensor: {}'.format(humidity))
logging.debug('Sensor data not ready yet, will try again...')
time.sleep(0.5)
logging.info('Broadcasting humidity: {}'.format(humidity))
humidity_signal = signal('humidity')
humidity_signal.send(self, humidity=humidity)
def get_pressure(self):
"""
Return measured pressure from the device.
:return:
"""
logging.debug('Measuring pressure')
pending_measurement = True
pressure = None
while pending_measurement:
if self._sensor.get_sensor_data():
pressure = self._sensor.data.pressure
pending_measurement = False
logging.info('Pressure received from sensor: {}'.format(pressure))
logging.debug('Sensor data not ready yet, will try again...')
time.sleep(0.5)
logging.info('Broadcasting pressure: {}'.format(pressure))
pressure_signal = signal('pressure')
pressure_signal.send(self, pressure=pressure)
def get_air_quality(self):
"""
Return measured air quality from the device.
:return:
"""
logging.debug('Measuring air quality')
initial_temperature = None
logging.debug('Measuring initial temperature and humidity readings')
pending_measurement = True
while pending_measurement:
if self._sensor.get_sensor_data():
initial_temperature = self._sensor.data.temperature
pending_measurement = False
logging.info('Temperature received from sensor: {}'.format(initial_temperature))
logging.debug('Sensor data not ready yet, will try again...')
time.sleep(0.5)
self._sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
self._sensor.set_gas_heater_temperature(320)
self._sensor.set_gas_heater_duration(150)
self._sensor.select_gas_heater_profile(0)
if self._air_quality_baseline_calculated is False:
logging.info('Sensor has no air quality baseline, calculating now')
self._calculate_air_quality_baseline()
pending_measurement = True
pending_count = 0
air_quality = None
gas_resistance = None
hum_baseline = 40.0
hum_weighting = 0.25
start_time = time.time()
current_time = time.time()
sample_data = []
while current_time - start_time < BME680.AIR_QUALITY_SAMPLE_TIME_IN_SECONDS:
current_time = time.time()
if self._sensor.get_sensor_data() and self._sensor.data.heat_stable:
sample_data.append(self._sensor.data.gas_resistance)
time.sleep(1)
self._sensor.set_gas_status(bme680.DISABLE_GAS_MEAS)
while pending_measurement and pending_count < 15:
if self._sensor.get_sensor_data():
gas_resistance = sum(sample_data[-35:]) / 35.0
gas_offset = self._gas_baseline - gas_resistance
hum = self._sensor.data.humidity
hum_offset = hum - hum_baseline
# Calculate hum_score as the distance from the hum_baseline.
if hum_offset > 0:
hum_score = (100 - hum_baseline - hum_offset) / (100 - hum_baseline) * (hum_weighting * 100)
else:
hum_score = (hum_baseline + hum_offset) / hum_baseline * (hum_weighting * 100)
# Calculate gas_score as the distance from the gas_baseline.
if gas_offset > 0:
gas_score = (gas_resistance / self._gas_baseline) * (100 - (hum_weighting * 100))
else:
gas_score = 100 - (hum_weighting * 100)
# Calculate air_quality_score.
air_quality = hum_score + gas_score
pending_measurement = False
logging.info('Air quality received from sensor: {}'.format(air_quality))
logging.debug('Sensor data not ready yet, will try again...')
pending_count = pending_count + 1
time.sleep(0.5)
logging.info('Broadcasting air quality: {}'.format(air_quality))
air_quality_signal = signal('air_quality')
air_quality_signal.send(self, air_quality=air_quality, gas=gas_resistance)
# Allow gas plate to cool down
cooling = True
while cooling:
if self._sensor.get_sensor_data():
if self._sensor.data.temperature <= initial_temperature + BME680.COOL_DOWN_TEMPERATURE_DIFFERENCE or initial_temperature is None:
cooling = False
logging.debug('Sensor has cooled down sufficiently')
logging.debug('Sensor data not ready yet, will try again...')
time.sleep(0.5)
def _calculate_air_quality_baseline(self):
"""
Calculates the current baseline for the air quality sensor on the device.
:return:
"""
logging.debug('Calculating air quality baseline')
start_time = time.time()
current_time = time.time()
burn_in_data = []
while current_time - start_time < BME680.BURN_IN_TIME_IN_SECONDS:
current_time = time.time()
if self._sensor.get_sensor_data() and self._sensor.data.heat_stable:
gas = self._sensor.data.gas_resistance
burn_in_data.append(gas)
time.sleep(1)
self._gas_baseline = sum(burn_in_data[-50:]) / 50.0
logging.info('Air quality baseline calculated: {}'.format(self._gas_baseline))
self._air_quality_baseline_calculated = True
|
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from chaco.api import ArrayPlotData, Plot, HPlotContainer
from chaco.tools.api import ZoomTool, PanTool
from chaco.tools.image_inspector_tool import ImageInspectorOverlay, \
ImageInspectorTool
from enable.component import Component
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, List, Str, Bool, on_trait_change, String, \
Button, Dict, Any
from traitsui.api import View, Item, ListStrEditor, HGroup, VGroup, \
spring, VSplit, Group
# ============= standard library imports ========================
import Image
from numpy import array
import os
import six.moves.http_client
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.paths import paths
PORT = 8083
# TEST_IMAGE = Image.open(open('/Users/ross/Sandbox/snapshot001.jpg'))
# TEST_IMAGE = ImageData.fromfile('/Users/ross/Sandbox/foo.png')
class ImageContainer(HasTraits):
container = Instance(HPlotContainer, ())
name = String
def traits_view(self):
v = View(VGroup(
HGroup(spring, CustomLabel('name', color='maroon', size=16,
height=-25,
width=100,
), spring),
Item('container', show_label=False, editor=ComponentEditor()),
))
return v
class ImageSpec(HasTraits):
name = Str
note = Str
def traits_view(self):
v = View(VGroup(Item('name'),
Group(
Item('note', style='custom', show_label=False),
show_border=True,
label='Note'
)
)
)
return v
class ImageEditor(HasTraits):
names = List
selected = Str
save_db = Button('Save to DB')
image_spec = Instance(ImageSpec)
image_specs = Dict
db = Any
# ===============================================================================
# handlers
# ===============================================================================
def _selected_changed(self):
if self.selected in self.image_specs:
spec = self.image_specs[self.selected]
else:
spec = ImageSpec(name=self.selected)
self.image_specs[self.selected] = spec
self.image_spec = spec
def _save_db_fired(self):
db = self.db
print(db)
def traits_view(self):
v = View(
VSplit(
Item('names', show_label=False,
editor=ListStrEditor(editable=False,
selected='selected',
operations=[]
),
height=0.6
),
Item('image_spec', show_label=False, style='custom',
height=0.4
)
),
Item('save_db', show_label=False)
)
return v
class ImageBrowser(IsotopeDatabaseManager):
# db = Instance(IsotopeAdapter)
image_container = Instance(ImageContainer, ())
image_editor = Instance(ImageEditor)
plot = Instance(Component)
# names = List
# selected = Str
use_cache = Bool(True)
cache_dir = paths.image_cache_dir
_conn = None
def _image_editor_default(self):
im = ImageEditor(db=self.db)
return im
def _is_cached(self, p):
p = os.path.join(self.cache_dir, p)
return os.path.isfile(p)
def load_from_remote_source(self, name):
if self._is_cached(name):
data = self._get_cached(name)
else:
data = self._get_remote_file(name)
self._load_image_data(data)
def load_remote_directory(self, name):
self.info('retrieve contents of remote directory {}'.format(name))
resp = self._get(name)
if resp:
htxt = resp.read()
for li in htxt.split('\n'):
if li.startswith('<li>'):
args = li[4:].split('>')
name, _tail = args[1].split('<')
self.image_editor.names.append(name)
return True
def _connection_factory(self, reset=False):
if reset or self._conn is None:
host, port = 'localhost', 8081
url = '{}:{}'.format(host, port)
conn = six.moves.http_client.HTTPConnection(url)
else:
conn = self._conn
self._conn = conn
return conn
# def _get(self, name):
# conn = self._connection_factory()
# conn.request('GET', '/{}'.format(name))
# return conn.getresponse()
# def _get_remote_file(self, name):
# self.info('retrieve {} from remote directory'.format(name))
# resp = self._get(name)
#
# buf = StringIO()
# buf.write(resp.read())
# buf.seek(0)
# im = Image.open(buf)
# im = im.convert('RGB')
#
# if self.use_cache:
# buf.seek(0)
# if os.path.isdir(self.cache_dir):
# with open(os.path.join(self.cache_dir, name), 'w') as fp:
# fp.write(buf.read())
# else:
# self.info('cache directory does not exist. {}'.format(self.cache_dir))
#
# buf.close()
#
# return array(im)
def _get_cached(self, name):
self.info('retrieve {} from cache directory'.format(name))
p = os.path.join(self.cache_dir, name)
with open(p, 'r') as rfile:
im = Image.open(rfile)
im = im.convert('RGB')
return array(im)
def _load_image_data(self, data):
cont = HPlotContainer()
pd = ArrayPlotData()
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin='top left')
pd.set_data('img', data)
img_plot = plot.img_plot('img',
)[0]
self._add_inspector(img_plot)
self._add_tools(img_plot)
cont.add(plot)
cont.request_redraw()
self.image_container.container = cont
def _add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(component=img_plot, image_inspector=imgtool,
bgcolor="white", border_visible=True)
img_plot.overlays.append(overlay)
#
def _add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change('image_editor:selected')
def _selected_changed(self):
sel = self.image_editor.selected
if sel:
self.load_from_remote_source(sel)
self.image_container.name = sel
def traits_view(self):
v = View(
HGroup(
Item('image_editor', show_label=False, style='custom',
width=0.3
),
# Item('names', show_label=False, editor=ListStrEditor(editable=False,
# selected='selected',
# operations=[]
# ),
# width=0.3,
# ),
Item('image_container', style='custom',
width=0.7,
show_label=False)
),
# Item('container', show_label=False,
# width=0.7,
# editor=ComponentEditor())),
resizable=True,
height=800,
width=900
)
return v
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('image_viewer')
im = ImageBrowser(cache_dir='/Users/ross/Sandbox/cache')
im.load_remote_directory('')
# im.load_from_remote_source('raster2.png')
# im.load_remote_directory()
# im.names = 'snapshot001.jpg,snapshot002.jpg,snapshot003.jpg,snapshot004.jpg'.split(',')
# im.load_from_remote_source('foo')
# im.load_image_from_file('/Users/ross/Sandbox/diodefailsnapshot.jpg')
im.configure_traits()
# ============= EOF =============================================
|
|
try:
import Crypto.Hash.SHA3_256 as _SHA3_256 # from pycryptodome
sha3_256 = _SHA3_256.new
except ImportError:
from sha3 import sha3_256
from bitcoin import privtopub
import sys
import rlp
from rlp.sedes import big_endian_int, BigEndianInt, Binary
from rlp.utils import decode_hex, encode_hex, ascii_chr, str_to_bytes
import random
big_endian_to_int = lambda x: big_endian_int.deserialize(str_to_bytes(x).lstrip(b'\x00'))
int_to_big_endian = lambda x: big_endian_int.serialize(x)
TT256 = 2 ** 256
TT256M1 = 2 ** 256 - 1
TT255 = 2 ** 255
if sys.version_info.major == 2:
is_numeric = lambda x: isinstance(x, (int, long))
is_string = lambda x: isinstance(x, (str, unicode))
def to_string(value):
return str(value)
def int_to_bytes(value):
if isinstance(value, str):
return value
return int_to_big_endian(value)
def to_string_for_regexp(value):
return str(value)
unicode = unicode
else:
is_numeric = lambda x: isinstance(x, int)
is_string = lambda x: isinstance(x, bytes)
def to_string(value):
if isinstance(value, bytes):
return value
if isinstance(value, str):
return bytes(value, 'utf-8')
if isinstance(value, int):
return bytes(str(value), 'utf-8')
def int_to_bytes(value):
if isinstance(value, bytes):
return value
return int_to_big_endian(value)
def to_string_for_regexp(value):
return str(to_string(value), 'utf-8')
unicode = str
isnumeric = is_numeric
def safe_ord(value):
if isinstance(value, int):
return value
else:
return ord(value)
# decorator
def debug(label):
def deb(f):
def inner(*args, **kwargs):
i = random.randrange(1000000)
print(label, i, 'start', args)
x = f(*args, **kwargs)
print(label, i, 'end', x)
return x
return inner
return deb
def flatten(li):
o = []
for l in li:
o.extend(l)
return o
def bytearray_to_int(arr):
o = 0
for a in arr:
o = (o << 8) + a
return o
def int_to_32bytearray(i):
o = [0] * 32
for x in range(32):
o[31 - x] = i & 0xff
i >>= 8
return o
def sha3(seed):
return sha3_256(to_string(seed)).digest()
assert sha3('').encode('hex') == 'c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470'
def privtoaddr(x, extended=False):
if len(x) > 32:
x = decode_hex(x)
o = sha3(privtopub(x)[1:])[12:]
return add_checksum(o) if extended else o
def add_checksum(x):
if len(x) in (40, 48):
x = decode_hex(x)
if len(x) == 24:
return x
return x + sha3(x)[:4]
def check_and_strip_checksum(x):
if len(x) in (40, 48):
x = decode_hex(x)
assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:]
return x[:20]
def zpad(x, l):
return b'\x00' * max(0, l - len(x)) + x
def zunpad(x):
i = 0
while i < len(x) and (x[i] == 0 or x[i] == '\x00'):
i += 1
return x[i:]
def int_to_addr(x):
o = [''] * 20
for i in range(20):
o[19 - i] = ascii_chr(x & 0xff)
x >>= 8
return b''.join(o)
def coerce_addr_to_bin(x):
if is_numeric(x):
return encode_hex(zpad(big_endian_int.serialize(x), 20))
elif len(x) == 40 or len(x) == 0:
return decode_hex(x)
else:
return zpad(x, 20)[-20:]
def coerce_addr_to_hex(x):
if is_numeric(x):
return encode_hex(zpad(big_endian_int.serialize(x), 20))
elif len(x) == 40 or len(x) == 0:
return x
else:
return encode_hex(zpad(x, 20)[-20:])
def coerce_to_int(x):
if is_numeric(x):
return x
elif len(x) == 40:
return big_endian_to_int(decode_hex(x))
else:
return big_endian_to_int(x)
def coerce_to_bytes(x):
if is_numeric(x):
return big_endian_int.serialize(x)
elif len(x) == 40:
return decode_hex(x)
else:
return x
def parse_int_or_hex(s):
if is_numeric(s):
return s
elif s[:2] in (b'0x', '0x'):
s = to_string(s)
tail = (b'0' if len(s) % 2 else b'') + s[2:]
return big_endian_to_int(decode_hex(tail))
else:
return int(s)
def ceil32(x):
return x if x % 32 == 0 else x + 32 - (x % 32)
def to_signed(i):
return i if i < TT255 else i - TT256
def sha3rlp(x):
return sha3(rlp.encode(x))
# Format encoders/decoders for bin, addr, int
def decode_bin(v):
'''decodes a bytearray from serialization'''
if not is_string(v):
raise Exception("Value must be binary, not RLP array")
return v
def decode_addr(v):
'''decodes an address from serialization'''
if len(v) not in [0, 20]:
raise Exception("Serialized addresses must be empty or 20 bytes long!")
return encode_hex(v)
def decode_int(v):
'''decodes and integer from serialization'''
if len(v) > 0 and (v[0] == '\x00' or v[0] == 0):
raise Exception("No leading zero bytes allowed for integers")
return big_endian_to_int(v)
def decode_int256(v):
return big_endian_to_int(v)
def encode_bin(v):
'''encodes a bytearray into serialization'''
return v
def encode_root(v):
'''encodes a trie root into serialization'''
return v
def encode_int(v):
'''encodes an integer into serialization'''
if not is_numeric(v) or v < 0 or v >= TT256:
raise Exception("Integer invalid or out of range: %r" % v)
return int_to_big_endian(v)
def encode_int256(v):
return zpad(int_to_big_endian(v), 256)
def scan_bin(v):
if v[:2] in ('0x', b'0x'):
return decode_hex(v[2:])
else:
return decode_hex(v)
def scan_int(v):
if v[:2] in ('0x', b'0x'):
return big_endian_to_int(decode_hex(v[2:]))
else:
return int(v)
# Decoding from RLP serialization
decoders = {
"bin": decode_bin,
"addr": decode_addr,
"int": decode_int,
"int256b": decode_int256,
}
# Encoding to RLP serialization
encoders = {
"bin": encode_bin,
"int": encode_int,
"trie_root": encode_root,
"int256b": encode_int256,
}
# Encoding to printable format
printers = {
"bin": lambda v: b'0x' + encode_hex(v),
"addr": lambda v: v,
"int": lambda v: to_string(v),
"trie_root": lambda v: encode_hex(v),
"int256b": lambda x: encode_hex(zpad(encode_int256(x), 256))
}
# Decoding from printable format
scanners = {
"bin": scan_bin,
"addr": lambda x: x[2:] if x[:2] == b'0x' else x,
"int": scan_int,
"trie_root": lambda x: scan_bin,
"int256b": lambda x: big_endian_to_int(decode_hex(x))
}
def int_to_hex(x):
o = encode_hex(encode_int(x))
return '0x' + (o[1:] if (len(o) > 0 and o[0] == '0') else o)
def remove_0x_head(s):
return s[2:] if s[:2] == b'0x' else s
def print_func_call(ignore_first_arg=False, max_call_number=100):
''' utility function to facilitate debug, it will print input args before
function call, and print return value after function call
usage:
@print_func_call
def some_func_to_be_debu():
pass
:param ignore_first_arg: whether print the first arg or not.
useful when ignore the `self` parameter of an object method call
'''
from functools import wraps
def display(x):
x = to_string(x)
try:
x.decode('ascii')
except:
return 'NON_PRINTABLE'
return x
local = {'call_number': 0}
def inner(f):
@wraps(f)
def wrapper(*args, **kwargs):
local['call_number'] += 1
tmp_args = args[1:] if ignore_first_arg and len(args) else args
this_call_number = local['call_number']
print(('{0}#{1} args: {2}, {3}'.format(
f.__name__,
this_call_number,
', '.join([display(x) for x in tmp_args]),
', '.join(display(key) + '=' + to_string(value)
for key, value in kwargs.items())
)))
res = f(*args, **kwargs)
print(('{0}#{1} return: {2}'.format(
f.__name__,
this_call_number,
display(res))))
if local['call_number'] > 100:
raise Exception("Touch max call number!")
return res
return wrapper
return inner
def dump_state(trie):
res = ''
for k, v in list(trie.to_dict().items()):
res += '%r:%r\n' % (encode_hex(k), encode_hex(v))
return res
class Denoms():
def __init__(self):
self.wei = 1
self.babbage = 10 ** 3
self.lovelace = 10 ** 6
self.shannon = 10 ** 9
self.szabo = 10 ** 12
self.finney = 10 ** 15
self.ether = 10 ** 18
self.turing = 2 ** 256
denoms = Denoms()
address = Binary.fixed_length(20, allow_empty=True)
int20 = BigEndianInt(20)
int32 = BigEndianInt(32)
int256 = BigEndianInt(256)
hash32 = Binary.fixed_length(32)
trie_root = Binary.fixed_length(32, allow_empty=True)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[91m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def DEBUG(msg, *args, **kwargs):
from ethereum import slogging
slogging.DEBUG(msg, *args, **kwargs)
|
|
# -*- coding: utf-8 -*-
"""Django settings for kitsune project."""
import logging
import os
import platform
import re
from datetime import date
from bundles import PIPELINE_CSS, PIPELINE_JS
from kitsune.lib.sumo_locales import LOCALES
DEBUG = True
TEMPLATE_DEBUG = DEBUG
STAGE = False
LOG_LEVEL = logging.INFO
SYSLOG_TAG = 'http_sumo_app'
# Repository directory.
ROOT = os.path.dirname(os.path.dirname(__file__))
# Django project directory.
PROJECT_ROOT = os.path.dirname(__file__)
PROJECT_MODULE = 'kitsune'
# path bases things off of ROOT
path = lambda *a: os.path.abspath(os.path.join(ROOT, *a))
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3'
# or 'oracle'.
'ENGINE': 'django.db.backends.mysql',
# Or path to database file if sqlite3.
'NAME': 'kitsune',
# Not used with sqlite3.
'USER': '',
# Not used with sqlite3.
'PASSWORD': '',
# Set to empty string for localhost. Not used with sqlite3.
'HOST': '',
# Set to empty string for default. Not used with sqlite3.
'PORT': '',
'OPTIONS': {'init_command': 'SET storage_engine=InnoDB'},
}
}
DATABASE_ROUTERS = ('multidb.PinningMasterSlaveRouter',)
# Put the aliases for your slave databases in this list
SLAVE_DATABASES = []
# Cache Settings
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['localhost:11211'],
# 'PREFIX': 'sumo:',
# },
# }
# Setting this to the Waffle version.
WAFFLE_CACHE_PREFIX = 'w0.7.7a:'
# Addresses email comes from
DEFAULT_FROM_EMAIL = 'notifications@support.mozilla.org'
DEFAULT_REPLY_TO_EMAIL = 'no-reply@mozilla.org'
SERVER_EMAIL = 'server-error@support.mozilla.org'
EMAIL_SUBJECT_PREFIX = '[support] '
PLATFORM_NAME = platform.node()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'US/Pacific'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Supported languages
# Note: We periodically add locales to this list and it is easier to
# review with changes with one locale per line.
SUMO_LANGUAGES = (
'af',
'ar',
'az',
'bg',
'bm',
'bn-BD',
'bn-IN',
'bs',
'ca',
'cs',
'da',
'de',
'ee',
'el',
'en-US',
'es',
'et',
'eu',
'fa',
'fi',
'fr',
'fy-NL',
'ga-IE',
'gl',
'gu-IN',
'ha',
'he',
'hi-IN',
'hr',
'hu',
'dsb',
'hsb',
'id',
'ig',
'it',
'ja',
'km',
'kn',
'ko',
'ln',
'lt',
'mg',
'mk',
'ml',
'ne-NP',
'nl',
'no',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'si',
'sk',
'sl',
'sq',
'sr',
'sw',
'sv',
'ta',
'ta-LK',
'te',
'th',
'tn',
'tr',
'uk',
'ur',
'vi',
'wo',
'xh',
'xx', # This is a test locale
'yo',
'zh-CN',
'zh-TW',
'zu',
)
# These languages won't show a warning about FxOS when contributors try
# to add content.
FXOS_LANGUAGES = [
'af',
'bm',
'bn-BD',
'bn-IN',
'cs',
'de',
'ee',
'el',
'en-US',
'es',
'fr',
'ha',
'hi-IN',
'hr',
'hu',
'ig',
'it',
'ln',
'mg',
'nl',
'pl',
'pt-BR',
'pt-PT',
'ro',
'ru',
'sr',
'ta',
'sr',
'sw',
'tr',
'wo',
'xh',
'yo',
'zu',
]
# These languages will get a wiki page instead of the product and topic pages.
SIMPLE_WIKI_LANGUAGES = [
'az',
'et',
'ga-IE',
'gl',
'dsb',
'hsb',
'kn',
'ml',
'tn',
]
# Languages that should show up in language switcher.
LANGUAGE_CHOICES = tuple(
[(lang, LOCALES[lang].native) for lang in SUMO_LANGUAGES
if lang != 'xx'])
LANGUAGE_CHOICES_ENGLISH = tuple(
[(lang, LOCALES[lang].english) for lang in SUMO_LANGUAGES
if lang != 'xx'])
LANGUAGES_DICT = dict([(i.lower(), LOCALES[i].native) for i in SUMO_LANGUAGES])
LANGUAGES = LANGUAGES_DICT.items()
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in SUMO_LANGUAGES])
# Locales that are known but unsupported. Keys are the locale, values
# are an optional fallback locale, or None, to use the LANGUAGE_CODE.
NON_SUPPORTED_LOCALES = {
'ach': None,
'ak': None,
'an': 'es',
'as': None,
'ast': 'es',
'be': 'ru',
'bn': 'bn-BD',
'br': 'fr',
'csb': 'pl',
'eo': None,
'ff': None,
'fur': 'it',
'gd': None,
'hy-AM': None,
'ilo': None,
'is': None,
'kk': None,
'lg': None,
'lij': 'it',
'mai': None,
'mn': None,
'mr': None,
'ms': None,
'my': None,
'nb-NO': 'no',
'nn-NO': 'no',
'nso': None,
'oc': 'fr',
'pa-IN': None,
'rm': None,
'rw': None,
'sah': None,
'son': None,
'sv-SE': 'sv',
}
ES_LOCALE_ANALYZERS = {
'ar': 'arabic',
'bg': 'bulgarian',
'ca': 'snowball-catalan',
'cs': 'czech',
'da': 'snowball-danish',
'de': 'snowball-german',
'en-US': 'snowball-english',
'es': 'snowball-spanish',
'eu': 'snowball-basque',
'fa': 'persian',
'fi': 'snowball-finnish',
'fr': 'snowball-french',
'hi-IN': 'hindi',
'hu': 'snowball-hungarian',
'id': 'indonesian',
'it': 'snowball-italian',
'ja': 'cjk',
'nl': 'snowball-dutch',
'no': 'snowball-norwegian',
'pl': 'polish',
'pt-BR': 'snowball-portuguese',
'pt-PT': 'snowball-portuguese',
'ro': 'snowball-romanian',
'ru': 'snowball-russian',
'sv': 'snowball-swedish',
'th': 'thai',
'tr': 'snowball-turkish',
'zh-CN': 'chinese',
'zh-TW': 'chinese',
}
ES_PLUGIN_ANALYZERS = [
'polish'
]
ES_USE_PLUGINS = False
TEXT_DOMAIN = 'messages'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as
# not to load the internationalization machinery.
USE_I18N = True
USE_L10N = True
DB_LOCALIZE = {
'karma': {
'Title': {
'attrs': ['name'],
'comments': ['This is a karma title.'],
}
},
'products': {
'Product': {
'attrs': ['title', 'description'],
},
'Topic': {
'attrs': ['title', 'description'],
},
},
'badger': {
'Badge': {
'attrs': ['title', 'description'],
},
},
}
# locale is in the kitsune git repo project directory, so that's
# up one directory from the PROJECT_ROOT
LOCALE_PATHS = (
path('locale'),
)
# Use the real robots.txt?
ENGAGE_ROBOTS = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = path('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_ROOT = path('static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
path('bower_components'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Paths that don't require a locale prefix.
SUPPORTED_NONLOCALES = (
'1',
'admin',
'api',
'favicon.ico',
'media',
'postcrash',
'robots.txt',
'services',
'wafflejs',
'geoip-suggestion',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#%tc(zja8j01!r#h_y)=hy!^k)9az74k+-ib&ij&+**s3-e^_z'
# List of callables that know how to import templates from various
# sources.
TEMPLATE_LOADERS = (
'jingo.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# Because Jinja2 is the default template loader, add any non-Jinja templated
# apps here:
JINGO_EXCLUDE_APPS = [
'admin',
'adminplus',
'authority',
'kadmin',
'rest_framework',
'waffle',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'kitsune.sumo.context_processors.global_settings',
'kitsune.sumo.context_processors.i18n',
'kitsune.sumo.context_processors.geoip_cache_detector',
'kitsune.sumo.context_processors.aaq_languages',
'kitsune.messages.context_processors.unread_message_count',
)
MIDDLEWARE_CLASSES = (
'multidb.middleware.PinningRouterMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'commonware.request.middleware.SetRemoteAddrFromForwardedFor',
# LocaleURLMiddleware requires access to request.user. These two must be
# loaded before the LocaleURLMiddleware
'commonware.middleware.NoVarySessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# This should come before TokenLoginMiddleware, because
# TokenLoginMiddleware uses this to tell users they have been
# automatically logged. It also has to come after
# NoVarySessionMiddleware.
'django.contrib.messages.middleware.MessageMiddleware',
# This middleware should come after AuthenticationMiddleware.
'kitsune.users.middleware.TokenLoginMiddleware',
# LocaleURLMiddleware must be before any middleware that uses
# sumo.urlresolvers.reverse() to add locale prefixes to URLs:
'kitsune.sumo.middleware.LocaleURLMiddleware',
# Mobile detection should happen in Zeus.
'kitsune.sumo.middleware.DetectMobileMiddleware',
'mobility.middleware.XMobileMiddleware',
'kitsune.sumo.middleware.MobileSwitchMiddleware',
'kitsune.sumo.middleware.Forbidden403Middleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'kitsune.sumo.middleware.RemoveSlashMiddleware',
'kitsune.inproduct.middleware.EuBuildMiddleware',
'kitsune.sumo.middleware.NoCacheHttpsMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'kitsune.sumo.anonymous.AnonymousIdentityMiddleware',
'session_csrf.CsrfMiddleware',
'kitsune.twitter.middleware.SessionMiddleware',
'kitsune.sumo.middleware.PlusToSpaceMiddleware',
'commonware.middleware.ScrubRequestOnException',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'waffle.middleware.WaffleMiddleware',
'commonware.middleware.ContentTypeOptionsHeader',
'commonware.middleware.StrictTransportMiddleware',
'commonware.middleware.XSSProtectionHeader',
'commonware.middleware.RobotsTagHeader',
# 'axes.middleware.FailedLoginMiddleware'
)
# Auth
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'kitsune.users.auth.TokenLoginBackend',
)
AUTH_PROFILE_MODULE = 'users.Profile'
USER_AVATAR_PATH = 'uploads/avatars/'
DEFAULT_AVATAR = 'sumo/img/avatar.png'
AVATAR_SIZE = 48 # in pixels
MAX_AVATAR_FILE_SIZE = 131072 # 100k, in bytes
GROUP_AVATAR_PATH = 'uploads/groupavatars/'
ACCOUNT_ACTIVATION_DAYS = 30
PASSWORD_HASHERS = (
'kitsune.users.hashers.SHA256PasswordHasher',
)
USERNAME_BLACKLIST = path('kitsune', 'configs', 'username-blacklist.txt')
ROOT_URLCONF = '%s.urls' % PROJECT_MODULE
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# Check templates in the sumo apps first. There are overrides for the admin
# templates.
path('kitsune', 'sumo', 'templates'),
)
# TODO: Figure out why changing the order of apps (for example, moving
# taggit higher in the list) breaks tests.
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'corsheaders',
'kitsune.users',
'dennis.django_dennis',
'tower',
'pipeline',
'authority',
'timezones',
'waffle',
'kitsune.access',
'kitsune.sumo',
'kitsune.search',
'kitsune.forums',
'djcelery',
'badger',
'cronjobs',
'tidings',
'rest_framework.authtoken',
'kitsune.questions',
'adminplus',
'kitsune.kadmin',
'kitsune.kbadge',
'taggit',
'kitsune.flagit',
'kitsune.upload',
'product_details',
'kitsune.wiki',
'kitsune.kbforums',
'kitsune.dashboards',
'kitsune.gallery',
'kitsune.customercare',
'kitsune.twitter',
'kitsune.inproduct',
'kitsune.postcrash',
'kitsune.landings',
'kitsune.announcements',
'kitsune.community',
'kitsune.messages',
'commonware.response.cookies',
'kitsune.groups',
'kitsune.karma',
'kitsune.tags',
'kitsune.kpi',
'kitsune.products',
'kitsune.notifications',
'kitsune.journal',
'kitsune.motidings',
'rest_framework',
'statici18n',
# 'axes',
# App for Sentry:
'raven.contrib.django',
# Extra apps for testing.
'django_nose',
# Extra app for python migrations.
'django_extensions',
# App for sample data
'eadred',
# In Django <= 1.6, this "must be placed somewhere after all the apps that
# are going to be generating activities". Putting it at the end is the safest.
'actstream',
)
TEST_RUNNER = 'kitsune.sumo.tests.TestSuiteRunner'
def JINJA_CONFIG():
from django.conf import settings
config = {'extensions': ['tower.template.i18n',
'jinja2.ext.autoescape',
'jinja2.ext.with_',
'jinja2.ext.do',
'pipeline.jinja2.ext.PipelineExtension'],
'finalize': lambda x: x if x is not None else ''}
return config
# Let Tower know about our additional keywords.
# DO NOT import an ngettext variant as _lazy.
TOWER_KEYWORDS = {
'_lazy': None,
}
# Tells the extract script what files to look for l10n in and what
# function handles the extraction. The Tower library expects this.
tower_tmpl = 'tower.management.commands.extract.extract_tower_template'
tower_python = 'tower.management.commands.extract.extract_tower_python'
DOMAIN_METHODS = {
'messages': [
('kitsune/forums/**.py', 'ignore'),
('kitsune/forums/**.html', 'ignore'),
('kitsune/**/tests/**.py', 'ignore'),
('kitsune/**/management/**.py', 'ignore'),
('kitsune/**.py', tower_python),
('kitsune/**/templates/**.html', tower_tmpl),
('vendor/src/django-tidings/**/templates/**.html', tower_tmpl),
('vendor/src/django-badger/badger/*.py', tower_python),
('vendor/src/django-badger/badger/templatetags/*.py', tower_python),
],
'lhtml': [
('kitsune/forums/**.lhtml', 'ignore'),
('**/templates/**.lhtml', tower_tmpl)
],
'ltxt': [
('**/templates/**.ltxt', tower_tmpl),
],
'javascript': [
# We can't say **.js because that would dive into any libraries.
('kitsune/**/static/js/*-all.js', 'ignore'),
('kitsune/**/static/js/*-min.js', 'ignore'),
('kitsune/**/static/js/*.js', 'javascript'),
],
}
# These domains will not be merged into messages.pot and will use
# separate PO files. See the following URL for an example of how to
# set these domains in DOMAIN_METHODS.
# http://github.com/jbalogh/zamboni/blob/d4c64239c24aa2f1e91276909823d1d1b290f0ee/settings.py#L254 # nopep8
STANDALONE_DOMAINS = [
TEXT_DOMAIN,
'javascript',
'yaocho',
]
STATICI18N_DOMAIN = 'javascript'
STATICI18N_PACKAGES = ['kitsune.sumo']
# If you have trouble extracting strings with Tower, try setting this
# to True
TOWER_ADD_HEADERS = True
#
# Django Pipline
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
'kitsune.lib.pipeline_compilers.BrowserifyCompiler',
)
PIPELINE_DISABLE_WRAPPER = True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
PIPELINE_UGLIFYJS_BINARY = path('node_modules/.bin/uglifyjs')
PIPELINE_UGLIFYJS_ARGUMENTS = '-r "\$super"'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.cssmin.CSSMinCompressor'
PIPELINE_CSSMIN_BINARY = path('node_modules/.bin/cssmin')
PIPELINE_LESS_BINARY = path('node_modules/.bin/lessc')
PIPELINE_LESS_ARGUMENTS = '--autoprefix="> 1%, last 2 versions, ff > 1"'
PIPELINE_BROWSERIFY_BINARY = path('node_modules/.bin/browserify')
PIPELINE_BROWSERIFY_ARGUMENTS = '-t babelify -t debowerify'
if DEBUG:
PIPELINE_BROWSERIFY_ARGUMENTS += ' -d'
NUNJUCKS_PRECOMPILE_BIN = 'nunjucks-precompile'
#
# Sessions
SESSION_COOKIE_AGE = 4 * 7 * 24 * 60 * 60 # 4 weeks
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
SESSION_EXISTS_COOKIE = 'sumo_session'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
#
# Connection information for Elastic
ES_URLS = ['http://127.0.0.1:9200']
# Indexes for reading
ES_INDEXES = {
'default': 'sumo-20130913',
'non-critical': 'sumo-non-critical',
'metrics': 'sumo-metrics',
}
# Indexes for indexing--set this to ES_INDEXES if you want to read to
# and write to the same index.
ES_WRITE_INDEXES = ES_INDEXES
# This is prepended to index names to get the final read/write index
# names used by kitsune. This is so that you can have multiple
# environments pointed at the same ElasticSearch cluster and not have
# them bump into one another.
ES_INDEX_PREFIX = 'sumo'
# Keep indexes up to date as objects are made/deleted.
ES_LIVE_INDEXING = False
# Timeout for querying requests
ES_TIMEOUT = 5
SEARCH_MAX_RESULTS = 1000
SEARCH_RESULTS_PER_PAGE = 10
# Search default settings
SEARCH_DEFAULT_CATEGORIES = (10, 20,)
SEARCH_DEFAULT_MAX_QUESTION_AGE = 180 * 24 * 60 * 60 # seconds
# IA default settings
IA_DEFAULT_CATEGORIES = (10, 20,)
# The length for which we would like the user to cache search forms
# and results, in minutes.
SEARCH_CACHE_PERIOD = 15
# Maximum length of the filename. Forms should use this and raise
# ValidationError if the length is exceeded.
# @see http://code.djangoproject.com/ticket/9893
# Columns are 250 but this leaves 50 chars for the upload_to prefix
MAX_FILENAME_LENGTH = 200
MAX_FILEPATH_LENGTH = 250
# Default storage engine - ours does not preserve filenames
DEFAULT_FILE_STORAGE = 'kitsune.upload.storage.RenameFileStorage'
# Auth and permissions related constants
LOGIN_URL = '/users/login'
LOGOUT_URL = '/users/logout'
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
REGISTER_URL = '/users/register'
# Video settings, hard coded here for now.
# TODO: figure out a way that doesn't need these values
WIKI_VIDEO_WIDTH = 640
WIKI_VIDEO_HEIGHT = 480
IMAGE_MAX_FILESIZE = 1048576 # 1 megabyte, in bytes
THUMBNAIL_SIZE = 120 # Thumbnail size, in pixels
THUMBNAIL_UPLOAD_PATH = 'uploads/images/thumbnails/'
IMAGE_UPLOAD_PATH = 'uploads/images/'
# A string listing image mime types to accept, comma separated.
# String must not contain double quotes!
IMAGE_ALLOWED_MIMETYPES = 'image/jpeg,image/png,image/gif'
# Topics
TOPIC_IMAGE_PATH = 'uploads/topics/'
# Products
PRODUCT_IMAGE_PATH = 'uploads/products/'
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Read-only mode setup.
READ_ONLY = False
# Turn on read-only mode in settings_local.py by putting this line
# at the VERY BOTTOM: read_only_mode(globals())
def read_only_mode(env):
env['READ_ONLY'] = True
# Replace the default (master) db with a slave connection.
if not env.get('SLAVE_DATABASES'):
raise Exception("We need at least one slave database.")
slave = env['SLAVE_DATABASES'][0]
env['DATABASES']['default'] = env['DATABASES'][slave]
# No sessions without the database, so disable auth.
env['AUTHENTICATION_BACKENDS'] = ('kitsune.sumo.readonlyauth.ReadOnlyBackend',)
# Add in the read-only middleware before csrf middleware.
extra = 'kitsune.sumo.middleware.ReadOnlyMiddleware'
before = 'session_csrf.CsrfMiddleware'
m = list(env['MIDDLEWARE_CLASSES'])
m.insert(m.index(before), extra)
env['MIDDLEWARE_CLASSES'] = tuple(m)
# Celery
import djcelery
djcelery.setup_loader()
BROKER_HOST = 'localhost'
BROKER_PORT = 5672
BROKER_USER = 'kitsune'
BROKER_PASSWORD = 'kitsune'
BROKER_VHOST = 'kitsune'
CELERY_RESULT_BACKEND = 'amqp'
CELERY_IGNORE_RESULT = True
CELERY_ALWAYS_EAGER = True # For tests. Set to False for use.
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_LOG_LEVEL = logging.INFO
CELERYD_CONCURRENCY = 4
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Explode loudly during tests.
CELERYD_HIJACK_ROOT_LOGGER = False
# Wiki rebuild settings
WIKI_REBUILD_TOKEN = 'sumo:wiki:full-rebuild'
# Anonymous user cookie
ANONYMOUS_COOKIE_NAME = 'SUMO_ANONID'
ANONYMOUS_COOKIE_MAX_AGE = 30 * 86400 # Seconds
# Do not change this without also deleting all wiki documents:
WIKI_DEFAULT_LANGUAGE = LANGUAGE_CODE
# Gallery settings
GALLERY_DEFAULT_LANGUAGE = WIKI_DEFAULT_LANGUAGE
GALLERY_IMAGE_PATH = 'uploads/gallery/images/'
GALLERY_IMAGE_THUMBNAIL_PATH = 'uploads/gallery/images/thumbnails/'
GALLERY_VIDEO_PATH = 'uploads/gallery/videos/'
GALLERY_VIDEO_URL = None
GALLERY_VIDEO_THUMBNAIL_PATH = 'uploads/gallery/videos/thumbnails/'
GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL = MEDIA_URL + 'img/video-thumb.png'
THUMBNAIL_PROGRESS_WIDTH = 32 # width of the above image
THUMBNAIL_PROGRESS_HEIGHT = 32 # height of the above image
VIDEO_MAX_FILESIZE = 52428800 # 50 megabytes, in bytes
# Customer Care settings
CC_MAX_TWEETS = 500 # Max. no. of tweets in DB
CC_TWEETS_PERPAGE = 100 # How many tweets to collect in one go. Max: 100.
CC_SHOW_REPLIES = True # Show replies to tweets?
CC_ALLOW_REMOVE = True # Allow users to hide tweets?
CC_TOP_CONTRIB_CACHE_KEY = 'sumo-cc-top-contrib-stats'
CC_TOP_CONTRIB_SORT = '1w'
CC_TOP_CONTRIB_LIMIT = 10
CC_STATS_CACHE_TIMEOUT = 24 * 60 * 60 # 24 hours
CC_STATS_WARNING = 30 * 60 * 60 # Warn if JSON data is older than 30 hours
CC_REPLIES_GOAL = 175 # Goal # of replies in 24 hours.
CC_TWEETS_DAYS = 7 # Limit tweets to those from the last 7 days.
# If any of these words show up in a tweet, it probably isn't
# actionable, so don't add it to the AoA.
CC_WORD_BLACKLIST = [
'#UninstallFirefox',
'pocket', # bug 1164008
'vagina',
'slut',
]
BITLY_API_URL = 'http://api.bitly.com/v3/shorten?callback=?'
BITLY_LOGIN = None
BITLY_API_KEY = None
TWITTER_COOKIE_SECURE = True
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
TWITTER_ACCESS_TOKEN = ''
TWITTER_ACCESS_TOKEN_SECRET = ''
TIDINGS_FROM_ADDRESS = 'notifications@support.mozilla.org'
# Anonymous watches must be confirmed.
TIDINGS_CONFIRM_ANONYMOUS_WATCHES = True
TIDINGS_MODEL_BASE = 'kitsune.sumo.models.ModelBase'
TIDINGS_REVERSE = 'kitsune.sumo.urlresolvers.reverse'
# Google Analytics settings.
GA_KEY = 'longkey' # Google API client key
GA_ACCOUNT = 'something@developer.gserviceaccount.com' # Google API Service Account email address
GA_PROFILE_ID = '12345678' # Google Analytics profile id for SUMO prod
GA_START_DATE = date(2012, 11, 10)
MOBILE_COOKIE = 'msumo'
# Directory of JavaScript test files for django_qunit to run
QUNIT_TEST_DIRECTORY = os.path.join('kitsune', 'sumo', 'static', 'sumo', 'js', 'tests')
# Key to access /services/version. Set to None to disallow.
VERSION_CHECK_TOKEN = None
REDIS_BACKENDS = {
#'default': 'redis://localhost:6379?socket_timeout=0.5&db=0',
#'karma': 'redis://localhost:6381?socket_timeout=0.5&db=0',
#'helpfulvotes': 'redis://localhost:6379?socket_timeout=0.5&db=1',
}
HELPFULVOTES_UNHELPFUL_KEY = 'helpfulvotes_topunhelpful'
LAST_SEARCH_COOKIE = 'last_search'
OPTIPNG_PATH = None
# Zendesk info. Fill in the prefix, email and password in settings_local.py.
ZENDESK_URL = 'https://appsmarket.zendesk.com'
ZENDESK_SUBJECT_PREFIX = '[TEST] ' # Set to '' in prod
ZENDESK_USER_EMAIL = ''
ZENDESK_USER_PASSWORD = ''
# Tasty Pie
API_LIMIT_PER_PAGE = 0
# Change the default for XFrameOptionsMiddleware.
X_FRAME_OPTIONS = 'DENY'
# Where to find the about:support troubleshooting addon.
# This is a link to the latest version, whatever that may be.
TROUBLESHOOTER_ADDON_URL = 'https://addons.mozilla.org/firefox/downloads/latest/426841/addon-426841-latest.xpi'
# SurveyGizmo API
SURVEYGIZMO_USER = ''
SURVEYGIZMO_PASSWORD = ''
# Django Rest Framework
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'kitsune.sumo.api.InactiveSessionAuthentication',
),
}
# Django-axes settings.
AXES_LOGIN_FAILURE_LIMIT = 10
AXES_LOCK_OUT_AT_FAILURE = True
AXES_USE_USER_AGENT = False
AXES_COOLOFF_TIME = 1 # hour
AXES_BEHIND_REVERSE_PROXY = True
AXES_REVERSE_PROXY_HEADER = 'HTTP_X_CLUSTER_CLIENT_IP'
# Set this to True to wrap each HTTP request in a transaction on this database.
ATOMIC_REQUESTS = True
# CORS Setup
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = [
r'^/api/1/gallery/.*$',
r'^/api/1/kb/.*$',
r'^/api/1/products/.*',
r'^/api/1/users/get_token$',
r'^/api/1/users/test_auth$',
r'^/api/2/answer/.*$',
r'^/api/2/pushnotification/.*$',
r'^/api/2/notification/.*$',
r'^/api/2/question/.*$',
r'^/api/2/realtime/.*$',
r'^/api/2/search/.*$',
r'^/api/2/user/.*$',
]
# Now combine all those regexes with one big "or".
CORS_URLS_REGEX = re.compile('|'.join('({0})'.format(r) for r in CORS_URLS_REGEX))
# XXX Fix this when Bug 1059545 is fixed
CC_IGNORE_USERS = []
ACTSTREAM_SETTINGS = {
'USE_JSONFIELD': True,
}
|
|
# yellowbrick.classifier.threshold
# DiscriminationThreshold visualizer for probabilistic classifiers.
#
# Author: Nathan Danielsen
# Author: Benjamin Bengfort
# Created: Wed April 26 20:17:29 2017 -0700
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: threshold.py [] nathan.danielsen@gmail.com $
"""
DiscriminationThreshold visualizer for probabilistic classifiers.
"""
##########################################################################
## Imports
##########################################################################
import bisect
import numpy as np
from scipy.stats import mstats
from collections import defaultdict
from sklearn.base import clone
from sklearn.utils import indexable
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import precision_recall_curve
from sklearn.utils.multiclass import type_of_target
try:
# See #1137: this allows compatibility for scikit-learn >= 0.24
from sklearn.utils import safe_indexing as _safe_indexing
except ImportError:
from sklearn.utils import _safe_indexing
from yellowbrick.base import ModelVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.utils import is_classifier, is_monotonic, is_probabilistic
from yellowbrick.exceptions import YellowbrickTypeError, YellowbrickValueError
# Quantiles for lower bound, curve, and upper bound
QUANTILES_MEDIAN_80 = np.array([0.1, 0.5, 0.9])
# List of threshold metrics
METRICS = ["precision", "recall", "fscore", "queue_rate"]
##########################################################################
# Discrimination Thresholds Visualization
##########################################################################
class DiscriminationThreshold(ModelVisualizer):
"""
Visualizes how precision, recall, f1 score, and queue rate change as the
discrimination threshold increases. For probabilistic, binary classifiers,
the discrimination threshold is the probability at which you choose the
positive class over the negative. Generally this is set to 50%, but
adjusting the discrimination threshold will adjust sensitivity to false
positives which is described by the inverse relationship of precision and
recall with respect to the threshold.
The visualizer also accounts for variability in the model by running
multiple trials with different train and test splits of the data. The
variability is visualized using a band such that the curve is drawn as the
median score of each trial and the band is from the 10th to 90th
percentile.
The visualizer is intended to help users determine an appropriate
threshold for decision making (e.g. at what threshold do we have a human
review the data), given a tolerance for precision and recall or limiting
the number of records to check (the queue rate).
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
n_trials : integer, default: 50
Number of times to shuffle and split the dataset to account for noise
in the threshold metrics curves. Note if cv provides > 1 splits,
the number of trials will be n_trials * cv.get_n_splits()
cv : float or cross-validation generator, default: 0.1
Determines the splitting strategy for each trial. Possible inputs are:
- float, to specify the percent of the test split
- object to be used as cross-validation generator
This attribute is meant to give flexibility with stratified splitting
but if a splitter is provided, it should only return one split and
have shuffle set to True.
fbeta : float, 1.0 by default
The strength of recall versus precision in the F-score.
argmax : str or None, default: 'fscore'
Annotate the threshold maximized by the supplied metric (see exclude
for the possible metrics to use). If None or passed to exclude,
will not annotate the graph.
exclude : str or list, optional
Specify metrics to omit from the graph, can include:
- ``"precision"``
- ``"recall"``
- ``"queue_rate"``
- ``"fscore"``
Excluded metrics will not be displayed in the graph, nor will they
be available in ``thresholds_``; however, they will be computed on fit.
quantiles : sequence, default: np.array([0.1, 0.5, 0.9])
Specify the quantiles to view model variability across a number of
trials. Must be monotonic and have three elements such that the first
element is the lower bound, the second is the drawn curve, and the
third is the upper bound. By default the curve is drawn at the median,
and the bounds from the 10th percentile to the 90th percentile.
random_state : int, optional
Used to seed the random state for shuffling the data while composing
different train and test splits. If supplied, the random state is
incremented in a deterministic fashion for each split.
Note that if a splitter is provided, it's random state will also be
updated with this random state, even if it was previously set.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
thresholds_ : array
The uniform thresholds identified by each of the trial runs.
cv_scores_ : dict of arrays of ``len(thresholds_)``
The values for all included metrics including the upper and lower
bounds of the metrics defined by quantiles.
Notes
-----
The term "discrimination threshold" is rare in the literature. Here, we
use it to mean the probability at which the positive class is selected
over the negative class in binary classification.
Classification models must implement either a ``decision_function`` or
``predict_proba`` method in order to be used with this class. A
``YellowbrickTypeError`` is raised otherwise.
.. caution:: This method only works for binary, probabilistic classifiers.
.. seealso::
For a thorough explanation of discrimination thresholds, see:
`Visualizing Machine Learning Thresholds to Make Better Business
Decisions
<http://blog.insightdatalabs.com/visualizing-classifier-thresholds/>`_
by Insight Data.
"""
def __init__(
self,
estimator,
ax=None,
n_trials=50,
cv=0.1,
fbeta=1.0,
argmax="fscore",
exclude=None,
quantiles=QUANTILES_MEDIAN_80,
random_state=None,
is_fitted="auto",
force_model=False,
**kwargs
):
# Perform some quick type checking to help users avoid error.
if not force_model and (
not is_classifier(estimator) or not is_probabilistic(estimator)
):
raise YellowbrickTypeError(
"{} requires a probabilistic binary classifier".format(
self.__class__.__name__
)
)
# Check the various inputs
self._check_quantiles(quantiles)
self._check_cv(cv)
self._check_exclude(exclude)
self._check_argmax(argmax, exclude)
# Initialize the ModelVisualizer
super(DiscriminationThreshold, self).__init__(
estimator, ax=ax, is_fitted=is_fitted, **kwargs
)
# Set params
self.n_trials = n_trials
self.cv = cv
self.fbeta = fbeta
self.argmax = argmax
self.exclude = exclude
self.quantiles = quantiles
self.random_state = random_state
def fit(self, X, y, **kwargs):
"""
Fit is the entry point for the visualizer. Given instances described
by X and binary classes described in the target y, fit performs n
trials by shuffling and splitting the dataset then computing the
precision, recall, f1, and queue rate scores for each trial. The
scores are aggregated by the quantiles expressed then drawn.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values. The target y must
be a binary classification target.
kwargs: dict
keyword arguments passed to Scikit-Learn API.
Returns
-------
self : instance
Returns the instance of the visualizer
raises: YellowbrickValueError
If the target y is not a binary classification target.
"""
# Check target before metrics raise crazy exceptions
# TODO: Switch to using target type from utils.target
if type_of_target(y) != "binary":
raise YellowbrickValueError("multiclass format is not supported")
# Make arrays indexable for cross validation
X, y = indexable(X, y)
# TODO: parallelize trials with joblib (using sklearn utility)
# NOTE: parallelization with matplotlib is tricky at best!
trials = [
metric
for idx in range(self.n_trials)
for metric in self._split_fit_score_trial(X, y, idx)
]
# Compute maximum number of uniform thresholds across all trials
n_thresholds = np.array([len(t["thresholds"]) for t in trials]).min()
self.thresholds_ = np.linspace(0.0, 1.0, num=n_thresholds)
# Filter metrics and collect values for uniform thresholds
metrics = frozenset(METRICS) - self._check_exclude(self.exclude)
uniform_metrics = defaultdict(list)
for trial in trials:
rows = defaultdict(list)
for t in self.thresholds_:
idx = bisect.bisect_left(trial["thresholds"], t)
for metric in metrics:
rows[metric].append(trial[metric][idx])
for metric, row in rows.items():
uniform_metrics[metric].append(row)
# Convert metrics to metric arrays
uniform_metrics = {
metric: np.array(values) for metric, values in uniform_metrics.items()
}
# Perform aggregation and store cv_scores_
quantiles = self._check_quantiles(self.quantiles)
self.cv_scores_ = {}
for metric, values in uniform_metrics.items():
# Compute the lower, median, and upper plots
lower, median, upper = mstats.mquantiles(values, prob=quantiles, axis=0)
# Store the aggregates in cv scores
self.cv_scores_[metric] = median
self.cv_scores_["{}_lower".format(metric)] = lower
self.cv_scores_["{}_upper".format(metric)] = upper
# TODO: fit the underlying estimator with the best decision threshold
# Call super to ensure the underlying estimator is correctly fitted
super(DiscriminationThreshold, self).fit(X, y)
# Draw and always return self
self.draw()
return self
def _split_fit_score_trial(self, X, y, idx=0):
"""
Splits the dataset, fits a clone of the estimator, then scores it
according to the required metrics.
The index of the split is added to the random_state if the
random_state is not None; this ensures that every split is shuffled
differently but in a deterministic fashion for testing purposes.
"""
random_state = self.random_state
if random_state is not None:
random_state += idx
splitter = self._check_cv(self.cv, random_state)
for train_index, test_index in splitter.split(X, y):
# Safe indexing handles multiple types of inputs including
# DataFrames and structured arrays - required for generic splits.
X_train = _safe_indexing(X, train_index)
y_train = _safe_indexing(y, train_index)
X_test = _safe_indexing(X, test_index)
y_test = _safe_indexing(y, test_index)
model = clone(self.estimator)
model.fit(X_train, y_train)
if hasattr(model, "predict_proba"):
# Get the probabilities for the positive class
y_scores = model.predict_proba(X_test)[:, 1]
else:
# Use the decision function to get the scores
y_scores = model.decision_function(X_test)
# Compute the curve metrics and thresholds
curve_metrics = precision_recall_curve(y_test, y_scores)
precision, recall, thresholds = curve_metrics
# Compute the F1 score from precision and recall
# Don't need to warn for F, precision/recall would have warned
with np.errstate(divide="ignore", invalid="ignore"):
beta = self.fbeta ** 2
f_score = (1 + beta) * precision * recall / (beta * precision + recall)
# Ensure thresholds ends at 1
thresholds = np.append(thresholds, 1)
# Compute the queue rate
queue_rate = np.array(
[(y_scores >= threshold).mean() for threshold in thresholds]
)
yield {
"thresholds": thresholds,
"precision": precision,
"recall": recall,
"fscore": f_score,
"queue_rate": queue_rate,
}
def draw(self):
"""
Draws the cv scores as a line chart on the current axes.
"""
# Set the colors from the supplied values or reasonable defaults
color_values = resolve_colors(n_colors=4, colors=self.color)
# Get the metric used to annotate the graph with its maximizing value
argmax = self._check_argmax(self.argmax, self.exclude)
for idx, metric in enumerate(METRICS):
# Skip any excluded labels
if metric not in self.cv_scores_:
continue
# Get the color ensuring every metric has a static color
color = color_values[idx]
# Make the label pretty
if metric == "fscore":
if self.fbeta == 1.0:
label = "$f_1$"
else:
label = "$f_{{\beta={:0.1f}}}".format(self.fbeta)
else:
label = metric.replace("_", " ")
# Draw the metric values
self.ax.plot(
self.thresholds_, self.cv_scores_[metric], color=color, label=label
)
# Draw the upper and lower bounds
lower = self.cv_scores_["{}_lower".format(metric)]
upper = self.cv_scores_["{}_upper".format(metric)]
self.ax.fill_between(
self.thresholds_, upper, lower, alpha=0.35, linewidth=0, color=color
)
# Annotate the graph with the maximizing value
if argmax and argmax == metric:
argmax = self.cv_scores_[metric].argmax()
threshold = self.thresholds_[argmax]
self.ax.axvline(
threshold,
ls="--",
c="k",
lw=1,
label="$t_{}={:0.2f}$".format(metric[0], threshold),
)
return self.ax
def finalize(self, **kwargs):
"""
Sets a title and axis labels on the visualizer and ensures that the
axis limits are scaled to valid threshold values.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
super(DiscriminationThreshold, self).finalize(**kwargs)
# Set the title of the threshold visualiztion
self.set_title("Threshold Plot for {}".format(self.name))
self.ax.legend(frameon=True, loc="best")
self.ax.set_xlabel("discrimination threshold")
self.ax.set_ylabel("score")
self.ax.set_xlim(0.0, 1.0)
self.ax.set_ylim(0.0, 1.0)
def _check_quantiles(self, val):
"""
Validate the quantiles passed in. Returns the np array if valid.
"""
if len(val) != 3 or not is_monotonic(val) or not np.all(val < 1):
raise YellowbrickValueError(
"quantiles must be a sequence of three "
"monotonically increasing values less than 1"
)
return np.asarray(val)
def _check_cv(self, val, random_state=None):
"""
Validate the cv method passed in. Returns the split strategy if no
validation exception is raised.
"""
# Use default splitter in this case
if val is None:
val = 0.1
if isinstance(val, float) and val <= 1.0:
return ShuffleSplit(n_splits=1, test_size=val, random_state=random_state)
if hasattr(val, "split") and hasattr(val, "get_n_splits"):
if random_state is not None and hasattr(val, "random_state"):
val.random_state = random_state
return val
raise YellowbrickValueError("'{}' is not a valid cv splitter".format(type(val)))
def _check_exclude(self, val):
"""
Validate the excluded metrics. Returns the set of excluded params.
"""
if val is None:
exclude = frozenset()
elif isinstance(val, str):
exclude = frozenset([val.lower()])
else:
exclude = frozenset(map(lambda s: s.lower(), val))
if len(exclude - frozenset(METRICS)) > 0:
raise YellowbrickValueError(
"'{}' is not a valid metric to exclude".format(repr(val))
)
return exclude
def _check_argmax(self, val, exclude=None):
"""
Validate the argmax metric. Returns the metric used to annotate the graph.
"""
if val is None:
return None
argmax = val.lower()
if argmax not in METRICS:
raise YellowbrickValueError(
"'{}' is not a valid metric to use".format(repr(val))
)
exclude = self._check_exclude(exclude)
if argmax in exclude:
argmax = None
return argmax
##########################################################################
# Quick Methods
##########################################################################
def discrimination_threshold(
estimator,
X,
y,
ax=None,
n_trials=50,
cv=0.1,
fbeta=1.0,
argmax="fscore",
exclude=None,
quantiles=QUANTILES_MEDIAN_80,
random_state=None,
is_fitted="auto",
force_model=False,
show=True,
**kwargs
):
"""Discrimination Threshold
Visualizes how precision, recall, f1 score, and queue rate change as the
discrimination threshold increases. For probabilistic, binary classifiers,
the discrimination threshold is the probability at which you choose the
positive class over the negative. Generally this is set to 50%, but
adjusting the discrimination threshold will adjust sensitivity to false
positives which is described by the inverse relationship of precision and
recall with respect to the threshold.
.. seealso:: See DiscriminationThreshold for more details.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values. The target y must
be a binary classification target.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
n_trials : integer, default: 50
Number of times to shuffle and split the dataset to account for noise
in the threshold metrics curves. Note if cv provides > 1 splits,
the number of trials will be n_trials * cv.get_n_splits()
cv : float or cross-validation generator, default: 0.1
Determines the splitting strategy for each trial. Possible inputs are:
- float, to specify the percent of the test split
- object to be used as cross-validation generator
This attribute is meant to give flexibility with stratified splitting
but if a splitter is provided, it should only return one split and
have shuffle set to True.
fbeta : float, 1.0 by default
The strength of recall versus precision in the F-score.
argmax : str or None, default: 'fscore'
Annotate the threshold maximized by the supplied metric (see exclude
for the possible metrics to use). If None or passed to exclude,
will not annotate the graph.
exclude : str or list, optional
Specify metrics to omit from the graph, can include:
- ``"precision"``
- ``"recall"``
- ``"queue_rate"``
- ``"fscore"``
Excluded metrics will not be displayed in the graph, nor will they
be available in ``thresholds_``; however, they will be computed on fit.
quantiles : sequence, default: np.array([0.1, 0.5, 0.9])
Specify the quantiles to view model variability across a number of
trials. Must be monotonic and have three elements such that the first
element is the lower bound, the second is the drawn curve, and the
third is the upper bound. By default the curve is drawn at the median,
and the bounds from the 10th percentile to the 90th percentile.
random_state : int, optional
Used to seed the random state for shuffling the data while composing
different train and test splits. If supplied, the random state is
incremented in a deterministic fashion for each split.
Note that if a splitter is provided, it's random state will also be
updated with this random state, even if it was previously set.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Notes
-----
The term "discrimination threshold" is rare in the literature. Here, we
use it to mean the probability at which the positive class is selected
over the negative class in binary classification.
Classification models must implement either a ``decision_function`` or
``predict_proba`` method in order to be used with this class. A
``YellowbrickTypeError`` is raised otherwise.
.. seealso::
For a thorough explanation of discrimination thresholds, see:
`Visualizing Machine Learning Thresholds to Make Better Business
Decisions
<http://blog.insightdatalabs.com/visualizing-classifier-thresholds/>`_
by Insight Data.
Examples
--------
>>> from yellowbrick.classifier.threshold import discrimination_threshold
>>> from sklearn.linear_model import LogisticRegression
>>> from yellowbrick.datasets import load_occupancy
>>> X, y = load_occupancy()
>>> model = LogisticRegression(multi_class="auto", solver="liblinear")
>>> discrimination_threshold(model, X, y)
Returns
-------
viz : DiscriminationThreshold
Returns the fitted and finalized visualizer object.
"""
# Instantiate the visualizer
visualizer = DiscriminationThreshold(
estimator,
ax=ax,
n_trials=n_trials,
cv=cv,
fbeta=fbeta,
argmax=argmax,
exclude=exclude,
quantiles=quantiles,
random_state=random_state,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer
|
|
##
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from calendarserver.version import version
from twext.internet.gaiendpoint import GAIEndpoint
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.client.http import HTTPClientProtocol, ClientRequest
from txweb2.dav.util import allDataFromStream
from txweb2.http_headers import Headers, MimeType
from txweb2.stream import MemoryStream, readStream
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.protocol import Factory
from twistedcaldav.accounting import accountingEnabledForCategory, \
emitAccounting
from twistedcaldav.client.pool import _configuredClientContextFactory
from twistedcaldav.config import config
from twistedcaldav.util import utf8String
from cStringIO import StringIO
import base64
import json
log = Logger()
class ConduitRequest(object):
"""
An HTTP request between pods. This is typically used to send and receive JSON data. However,
for attachments, we need to send the actual attachment data as the request body, so in that
case the JSON data is sent in an HTTP header.
"""
def __init__(self, server, data, stream=None, stream_type=None, writeStream=None):
self.server = server
self.data = json.dumps(data)
self.stream = stream
self.streamType = stream_type
self.writeStream = writeStream
@inlineCallbacks
def doRequest(self, txn):
# Generate an HTTP client request
try:
if "xpod" not in txn.logItems:
txn.logItems["xpod"] = 0
txn.logItems["xpod"] += 1
response = (yield self._processRequest())
if accountingEnabledForCategory("xPod"):
self.loggedResponse = yield self.logResponse(response)
emitAccounting("xPod", "", self.loggedRequest + "\n" + self.loggedResponse, "POST")
if response.code == responsecode.OK:
if self.writeStream is None:
data = (yield allDataFromStream(response.stream))
data = json.loads(data)
else:
yield readStream(response.stream, self.writeStream.write)
content_type = response.headers.getHeader("content-type")
if content_type is None:
content_type = MimeType("application", "octet-stream")
content_disposition = response.headers.getHeader("content-disposition")
if content_disposition is None or "filename" not in content_disposition.params:
filename = ""
else:
filename = content_disposition.params["filename"]
self.writeStream.resetDetails(content_type, filename)
yield self.writeStream.loseConnection()
data = {
"result": "ok",
"content-type": content_type,
"name": filename,
}
elif response.code == responsecode.BAD_REQUEST:
data = (yield allDataFromStream(response.stream))
data = json.loads(data)
else:
raise ValueError("Incorrect cross-pod response status code: {}".format(response.code))
except Exception as e:
# Request failed
log.error("Could not do cross-pod request : {request} {ex}", request=self, ex=e)
raise ValueError("Failed cross-pod request: {}".format(e))
returnValue(data)
@inlineCallbacks
def logRequest(self, request):
"""
Log an HTTP request.
"""
iostr = StringIO()
iostr.write(">>>> Request start\n\n")
if hasattr(request, "clientproto"):
protocol = "HTTP/{:d}.{:d}".format(request.clientproto[0], request.clientproto[1])
else:
protocol = "HTTP/1.1"
iostr.write("{} {} {}\n".format(request.method, request.uri, protocol))
for name, valuelist in request.headers.getAllRawHeaders():
for value in valuelist:
# Do not log authorization details
if name not in ("Authorization",):
iostr.write("{}: {}\n".format(name, value))
else:
iostr.write("{}: xxxxxxxxx\n".format(name))
iostr.write("\n")
# We need to play a trick with the request stream as we can only read it once. So we
# read it, store the value in a MemoryStream, and replace the request's stream with that,
# so the data can be read again. Note if we are sending an attachment, we won't log
# the attachment data as we do not want to read it all into memory.
if self.stream is None:
data = (yield allDataFromStream(request.stream))
iostr.write(data)
request.stream = MemoryStream(data if data is not None else "")
request.stream.doStartReading = None
else:
iostr.write("<<Stream Type: {}>>\n".format(self.streamType))
iostr.write("\n\n>>>> Request end\n")
returnValue(iostr.getvalue())
@inlineCallbacks
def logResponse(self, response):
"""
Log an HTTP request.
"""
iostr = StringIO()
iostr.write(">>>> Response start\n\n")
code_message = responsecode.RESPONSES.get(response.code, "Unknown Status")
iostr.write("HTTP/1.1 {:d} {}\n".format(response.code, code_message))
for name, valuelist in response.headers.getAllRawHeaders():
for value in valuelist:
# Do not log authorization details
if name not in ("WWW-Authenticate",):
iostr.write("{}: {}\n".format(name, value))
else:
iostr.write("{}: xxxxxxxxx\n".format(name))
iostr.write("\n")
# We need to play a trick with the response stream to ensure we don't mess it up. So we
# read it, store the value in a MemoryStream, and replace the response's stream with that,
# so the data can be read again.
data = (yield allDataFromStream(response.stream))
iostr.write(data)
response.stream = MemoryStream(data if data is not None else "")
response.stream.doStartReading = None
iostr.write("\n\n>>>> Response end\n")
returnValue(iostr.getvalue())
@inlineCallbacks
def _processRequest(self):
"""
Process the request by sending it to the relevant server.
@return: the HTTP response.
@rtype: L{Response}
"""
ssl, host, port, _ignore_path = self.server.details()
path = "/" + config.Servers.ConduitName
headers = Headers()
headers.setHeader("Host", utf8String(host + ":{}".format(port)))
if self.streamType:
# For attachments we put the base64-encoded JSON data into a header
headers.setHeader("Content-Type", self.streamType)
headers.addRawHeader("XPOD", base64.b64encode(self.data))
else:
headers.setHeader("Content-Type", MimeType("application", "json", params={"charset": "utf-8", }))
headers.setHeader("User-Agent", "CalendarServer/{}".format(version))
headers.addRawHeader(*self.server.secretHeader())
from twisted.internet import reactor
f = Factory()
f.protocol = HTTPClientProtocol
ep = GAIEndpoint(reactor, host, port, _configuredClientContextFactory() if ssl else None)
proto = (yield ep.connect(f))
request = ClientRequest("POST", path, headers, self.stream if self.stream is not None else self.data)
if accountingEnabledForCategory("xPod"):
self.loggedRequest = yield self.logRequest(request)
response = (yield proto.submitRequest(request))
returnValue(response)
|
|
#!/usr/bin/env python
# Authors:
# Trevor Perrin
# Kees Bos - Added tests for XML-RPC
# Dimitris Moraitis - Anon ciphersuites
# Marcelo Fernandez - Added test for NPN
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
from __future__ import print_function
import sys
import os
import os.path
import socket
import time
import getopt
try:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from tlslite import TLSConnection, Fault, HandshakeSettings, \
X509, X509CertChain, IMAP4_TLS, VerifierDB, Session, SessionCache, \
parsePEMKey, constants, \
AlertDescription, HTTPTLSConnection, TLSSocketServerMixIn, \
POP3_TLS, m2cryptoLoaded, pycryptoLoaded, gmpyLoaded, tackpyLoaded, \
Checker, __version__
from tlslite.errors import *
from tlslite.utils.cryptomath import prngName
try:
import xmlrpclib
except ImportError:
# Python 3
from xmlrpc import client as xmlrpclib
from tlslite import *
try:
from tack.structures.Tack import Tack
except ImportError:
pass
def printUsage(s=None):
if m2cryptoLoaded:
crypto = "M2Crypto/OpenSSL"
else:
crypto = "Python crypto"
if s:
print("ERROR: %s" % s)
print("""\ntls.py version %s (using %s)
Commands:
server HOST:PORT DIRECTORY
client HOST:PORT DIRECTORY
""" % (__version__, crypto))
sys.exit(-1)
def testConnClient(conn):
b1 = os.urandom(1)
b10 = os.urandom(10)
b100 = os.urandom(100)
b1000 = os.urandom(1000)
conn.write(b1)
conn.write(b10)
conn.write(b100)
conn.write(b1000)
assert(conn.read(min=1, max=1) == b1)
assert(conn.read(min=10, max=10) == b10)
assert(conn.read(min=100, max=100) == b100)
assert(conn.read(min=1000, max=1000) == b1000)
def clientTestCmd(argv):
address = argv[0]
dir = argv[1]
#Split address into hostname/port tuple
address = address.split(":")
address = ( address[0], int(address[1]) )
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(sock, 'settimeout'): #It's a python 2.3 feature
sock.settimeout(5)
sock.connect(address)
c = TLSConnection(sock)
return c
test = 0
badFault = False
print("Test 0 - anonymous handshake")
connection = connect()
connection.handshakeClientAnonymous()
testConnClient(connection)
connection.close()
print("Test 1 - good X509 (plus SNI)")
connection = connect()
connection.handshakeClientCert(serverName=address[0])
testConnClient(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
assert(connection.session.serverName == address[0])
connection.close()
print("Test 1.a - good X509, SSLv3")
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeClientCert(settings=settings)
testConnClient(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
print("Test 1.b - good X509, RC4-MD5")
connection = connect()
settings = HandshakeSettings()
settings.macNames = ["md5"]
connection.handshakeClientCert(settings=settings)
testConnClient(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
assert(connection.session.cipherSuite == constants.CipherSuite.TLS_RSA_WITH_RC4_128_MD5)
connection.close()
if tackpyLoaded:
settings = HandshakeSettings()
settings.useExperimentalTackExtension = True
print("Test 2.a - good X.509, TACK")
connection = connect()
connection.handshakeClientCert(settings=settings)
assert(connection.session.tackExt.tacks[0].getTackId() == "rrted.ptvtl.d2uiq.ox2xe.w4ss3")
assert(connection.session.tackExt.activation_flags == 1)
testConnClient(connection)
connection.close()
print("Test 2.b - good X.509, TACK unrelated to cert chain")
connection = connect()
try:
connection.handshakeClientCert(settings=settings)
assert(False)
except TLSLocalAlert as alert:
if alert.description != AlertDescription.illegal_parameter:
raise
connection.close()
print("Test 3 - good SRP")
connection = connect()
connection.handshakeClientSRP("test", "password")
testConnClient(connection)
connection.close()
print("Test 4 - SRP faults")
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeClientSRP("test", "password")
print(" Good Fault %s" % (Fault.faultNames[fault]))
except TLSFaultError as e:
print(" BAD FAULT %s: %s" % (Fault.faultNames[fault], str(e)))
badFault = True
print("Test 6 - good SRP: with X.509 certificate, TLSv1.0")
settings = HandshakeSettings()
settings.minVersion = (3,1)
settings.maxVersion = (3,1)
connection = connect()
connection.handshakeClientSRP("test", "password", settings=settings)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
testConnClient(connection)
connection.close()
print("Test 7 - X.509 with SRP faults")
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeClientSRP("test", "password")
print(" Good Fault %s" % (Fault.faultNames[fault]))
except TLSFaultError as e:
print(" BAD FAULT %s: %s" % (Fault.faultNames[fault], str(e)))
badFault = True
print("Test 11 - X.509 faults")
for fault in Fault.clientNoAuthFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeClientCert()
print(" Good Fault %s" % (Fault.faultNames[fault]))
except TLSFaultError as e:
print(" BAD FAULT %s: %s" % (Fault.faultNames[fault], str(e)))
badFault = True
print("Test 14 - good mutual X509")
x509Cert = X509().parse(open(os.path.join(dir, "clientX509Cert.pem")).read())
x509Chain = X509CertChain([x509Cert])
s = open(os.path.join(dir, "clientX509Key.pem")).read()
x509Key = parsePEMKey(s, private=True)
connection = connect()
connection.handshakeClientCert(x509Chain, x509Key)
testConnClient(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
print("Test 14.a - good mutual X509, SSLv3")
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeClientCert(x509Chain, x509Key, settings=settings)
testConnClient(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
print("Test 15 - mutual X.509 faults")
for fault in Fault.clientCertFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeClientCert(x509Chain, x509Key)
print(" Good Fault %s" % (Fault.faultNames[fault]))
except TLSFaultError as e:
print(" BAD FAULT %s: %s" % (Fault.faultNames[fault], str(e)))
badFault = True
print("Test 18 - good SRP, prepare to resume... (plus SNI)")
connection = connect()
connection.handshakeClientSRP("test", "password", serverName=address[0])
testConnClient(connection)
connection.close()
session = connection.session
print("Test 19 - resumption (plus SNI)")
connection = connect()
connection.handshakeClientSRP("test", "garbage", serverName=address[0],
session=session)
testConnClient(connection)
#Don't close! -- see below
print("Test 20 - invalidated resumption (plus SNI)")
connection.sock.close() #Close the socket without a close_notify!
connection = connect()
try:
connection.handshakeClientSRP("test", "garbage",
serverName=address[0], session=session)
assert(False)
except TLSRemoteAlert as alert:
if alert.description != AlertDescription.bad_record_mac:
raise
connection.close()
print("Test 21 - HTTPS test X.509")
address = address[0], address[1]+1
if hasattr(socket, "timeout"):
timeoutEx = socket.timeout
else:
timeoutEx = socket.error
while 1:
try:
time.sleep(2)
htmlBody = bytearray(open(os.path.join(dir, "index.html")).read(), "utf-8")
fingerprint = None
for y in range(2):
checker =Checker(x509Fingerprint=fingerprint)
h = HTTPTLSConnection(\
address[0], address[1], checker=checker)
for x in range(3):
h.request("GET", "/index.html")
r = h.getresponse()
assert(r.status == 200)
b = bytearray(r.read())
assert(b == htmlBody)
fingerprint = h.tlsSession.serverCertChain.getFingerprint()
assert(fingerprint)
time.sleep(2)
break
except timeoutEx:
print("timeout, retrying...")
pass
address = address[0], address[1]+1
implementations = []
if m2cryptoLoaded:
implementations.append("openssl")
if pycryptoLoaded:
implementations.append("pycrypto")
implementations.append("python")
print("Test 22 - different ciphers, TLSv1.0")
for implementation in implementations:
for cipher in ["aes128", "aes256", "rc4"]:
print("Test 22:", end=' ')
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
settings.minVersion = (3,1)
settings.maxVersion = (3,1)
connection.handshakeClientCert(settings=settings)
testConnClient(connection)
print("%s %s" % (connection.getCipherName(), connection.getCipherImplementation()))
connection.close()
print("Test 23 - throughput test")
for implementation in implementations:
for cipher in ["aes128", "aes256", "3des", "rc4"]:
if cipher == "3des" and implementation not in ("openssl", "pycrypto"):
continue
print("Test 23:", end=' ')
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
connection.handshakeClientCert(settings=settings)
print("%s %s:" % (connection.getCipherName(), connection.getCipherImplementation()), end=' ')
startTime = time.clock()
connection.write(b"hello"*10000)
h = connection.read(min=50000, max=50000)
stopTime = time.clock()
if stopTime-startTime:
print("100K exchanged at rate of %d bytes/sec" % int(100000/(stopTime-startTime)))
else:
print("100K exchanged very fast")
assert(h == b"hello"*10000)
connection.close()
print("Test 24.a - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'http/1.1')
connection.close()
print("Test 24.b - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"spdy/2", b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'spdy/2')
connection.close()
print("Test 24.c - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"spdy/2", b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'spdy/2')
connection.close()
print("Test 24.d - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"spdy/3", b"spdy/2", b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'spdy/2')
connection.close()
print("Test 24.e - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"spdy/3", b"spdy/2", b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'spdy/3')
connection.close()
print("Test 24.f - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'http/1.1')
connection.close()
print("Test 24.g - Next-Protocol Client Negotiation")
connection = connect()
connection.handshakeClientCert(nextProtos=[b"spdy/2", b"http/1.1"])
#print(" Next-Protocol Negotiated: %s" % connection.next_proto)
assert(connection.next_proto == b'spdy/2')
connection.close()
print('Test 25 - good standard XMLRPC https client')
time.sleep(2) # Hack for lack of ability to set timeout here
address = address[0], address[1]+1
server = xmlrpclib.Server('https://%s:%s' % address)
assert server.add(1,2) == 3
assert server.pow(2,4) == 16
print('Test 26 - good tlslite XMLRPC client')
transport = XMLRPCTransport(ignoreAbruptClose=True)
server = xmlrpclib.Server('https://%s:%s' % address, transport)
assert server.add(1,2) == 3
assert server.pow(2,4) == 16
print('Test 27 - good XMLRPC ignored protocol')
server = xmlrpclib.Server('http://%s:%s' % address, transport)
assert server.add(1,2) == 3
assert server.pow(2,4) == 16
print("Test 28 - Internet servers test")
try:
i = IMAP4_TLS("cyrus.andrew.cmu.edu")
i.login("anonymous", "anonymous@anonymous.net")
i.logout()
print("Test 28: IMAP4 good")
p = POP3_TLS("pop.gmail.com")
p.quit()
print("Test 29: POP3 good")
except socket.error as e:
print("Non-critical error: socket error trying to reach internet server: ", e)
if not badFault:
print("Test succeeded")
else:
print("Test failed")
def testConnServer(connection):
count = 0
while 1:
s = connection.read()
count += len(s)
if len(s) == 0:
break
connection.write(s)
if count == 1111:
break
def serverTestCmd(argv):
address = argv[0]
dir = argv[1]
#Split address into hostname/port tuple
address = address.split(":")
address = ( address[0], int(address[1]) )
#Connect to server
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
lsock.bind(address)
lsock.listen(5)
def connect():
return TLSConnection(lsock.accept()[0])
x509Cert = X509().parse(open(os.path.join(dir, "serverX509Cert.pem")).read())
x509Chain = X509CertChain([x509Cert])
s = open(os.path.join(dir, "serverX509Key.pem")).read()
x509Key = parsePEMKey(s, private=True)
print("Test 0 - Anonymous server handshake")
connection = connect()
connection.handshakeServer(anon=True)
testConnServer(connection)
connection.close()
print("Test 1 - good X.509")
connection = connect()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
assert(connection.session.serverName == address[0])
testConnServer(connection)
connection.close()
print("Test 1.a - good X.509, SSL v3")
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, settings=settings)
testConnServer(connection)
connection.close()
print("Test 1.b - good X.509, RC4-MD5")
connection = connect()
settings = HandshakeSettings()
settings.macNames = ["sha", "md5"]
settings.cipherNames = ["rc4"]
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, settings=settings)
testConnServer(connection)
connection.close()
if tackpyLoaded:
tack = Tack.createFromPem(open("./TACK1.pem", "rU").read())
tackUnrelated = Tack.createFromPem(open("./TACKunrelated.pem", "rU").read())
settings = HandshakeSettings()
settings.useExperimentalTackExtension = True
print("Test 2.a - good X.509, TACK")
connection = connect()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
tacks=[tack], activationFlags=1, settings=settings)
testConnServer(connection)
connection.close()
print("Test 2.b - good X.509, TACK unrelated to cert chain")
connection = connect()
try:
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
tacks=[tackUnrelated], settings=settings)
assert(False)
except TLSRemoteAlert as alert:
if alert.description != AlertDescription.illegal_parameter:
raise
print("Test 3 - good SRP")
verifierDB = VerifierDB()
verifierDB.create()
entry = VerifierDB.makeVerifier("test", "password", 1536)
verifierDB["test"] = entry
connection = connect()
connection.handshakeServer(verifierDB=verifierDB)
testConnServer(connection)
connection.close()
print("Test 4 - SRP faults")
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(verifierDB=verifierDB)
assert()
except:
pass
connection.close()
print("Test 6 - good SRP: with X.509 cert")
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, \
certChain=x509Chain, privateKey=x509Key)
testConnServer(connection)
connection.close()
print("Test 7 - X.509 with SRP faults")
for fault in Fault.clientSrpFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(verifierDB=verifierDB, \
certChain=x509Chain, privateKey=x509Key)
assert()
except:
pass
connection.close()
print("Test 11 - X.509 faults")
for fault in Fault.clientNoAuthFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
assert()
except:
pass
connection.close()
print("Test 14 - good mutual X.509")
connection = connect()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True)
testConnServer(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
print("Test 14a - good mutual X.509, SSLv3")
connection = connect()
settings = HandshakeSettings()
settings.minVersion = (3,0)
settings.maxVersion = (3,0)
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True, settings=settings)
testConnServer(connection)
assert(isinstance(connection.session.serverCertChain, X509CertChain))
connection.close()
print("Test 15 - mutual X.509 faults")
for fault in Fault.clientCertFaults + Fault.genericFaults:
connection = connect()
connection.fault = fault
try:
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key, reqCert=True)
assert()
except:
pass
connection.close()
print("Test 18 - good SRP, prepare to resume")
sessionCache = SessionCache()
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
assert(connection.session.serverName == address[0])
testConnServer(connection)
connection.close()
print("Test 19 - resumption")
connection = connect()
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
assert(connection.session.serverName == address[0])
testConnServer(connection)
#Don't close! -- see next test
print("Test 20 - invalidated resumption")
try:
connection.read(min=1, max=1)
assert() #Client is going to close the socket without a close_notify
except TLSAbruptCloseError as e:
pass
connection = connect()
try:
connection.handshakeServer(verifierDB=verifierDB, sessionCache=sessionCache)
except TLSLocalAlert as alert:
if alert.description != AlertDescription.bad_record_mac:
raise
connection.close()
print("Test 21 - HTTPS test X.509")
#Close the current listening socket
lsock.close()
#Create and run an HTTP Server using TLSSocketServerMixIn
class MyHTTPServer(TLSSocketServerMixIn,
HTTPServer):
def handshake(self, tlsConnection):
tlsConnection.handshakeServer(certChain=x509Chain, privateKey=x509Key)
return True
cd = os.getcwd()
os.chdir(dir)
address = address[0], address[1]+1
httpd = MyHTTPServer(address, SimpleHTTPRequestHandler)
for x in range(6):
httpd.handle_request()
httpd.server_close()
cd = os.chdir(cd)
#Re-connect the listening socket
lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = address[0], address[1]+1
lsock.bind(address)
lsock.listen(5)
implementations = []
if m2cryptoLoaded:
implementations.append("openssl")
if pycryptoLoaded:
implementations.append("pycrypto")
implementations.append("python")
print("Test 22 - different ciphers")
for implementation in ["python"] * len(implementations):
for cipher in ["aes128", "aes256", "rc4"]:
print("Test 22:", end=' ')
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings)
print(connection.getCipherName(), connection.getCipherImplementation())
testConnServer(connection)
connection.close()
print("Test 23 - throughput test")
for implementation in implementations:
for cipher in ["aes128", "aes256", "3des", "rc4"]:
if cipher == "3des" and implementation not in ("openssl", "pycrypto"):
continue
print("Test 23:", end=' ')
connection = connect()
settings = HandshakeSettings()
settings.cipherNames = [cipher]
settings.cipherImplementations = [implementation, "python"]
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings)
print(connection.getCipherName(), connection.getCipherImplementation())
h = connection.read(min=50000, max=50000)
assert(h == b"hello"*10000)
connection.write(h)
connection.close()
print("Test 24.a - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"http/1.1"])
testConnServer(connection)
connection.close()
print("Test 24.b - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"spdy/2", b"http/1.1"])
testConnServer(connection)
connection.close()
print("Test 24.c - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"http/1.1", b"spdy/2"])
testConnServer(connection)
connection.close()
print("Test 24.d - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"spdy/2", b"http/1.1"])
testConnServer(connection)
connection.close()
print("Test 24.e - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"http/1.1", b"spdy/2", b"spdy/3"])
testConnServer(connection)
connection.close()
print("Test 24.f - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[b"spdy/3", b"spdy/2"])
testConnServer(connection)
connection.close()
print("Test 24.g - Next-Protocol Server Negotiation")
connection = connect()
settings = HandshakeSettings()
connection.handshakeServer(certChain=x509Chain, privateKey=x509Key,
settings=settings, nextProtos=[])
testConnServer(connection)
connection.close()
print("Tests 25-27 - XMLRPXC server")
address = address[0], address[1]+1
class Server(TLSXMLRPCServer):
def handshake(self, tlsConnection):
try:
tlsConnection.handshakeServer(certChain=x509Chain,
privateKey=x509Key,
sessionCache=sessionCache)
tlsConnection.ignoreAbruptClose = True
return True
except TLSError as error:
print("Handshake failure:", str(error))
return False
class MyFuncs:
def pow(self, x, y): return pow(x, y)
def add(self, x, y): return x + y
server = Server(address)
server.register_instance(MyFuncs())
#sa = server.socket.getsockname()
#print "Serving HTTPS on", sa[0], "port", sa[1]
for i in range(6):
server.handle_request()
print("Test succeeded")
if __name__ == '__main__':
if len(sys.argv) < 2:
printUsage("Missing command")
elif sys.argv[1] == "client"[:len(sys.argv[1])]:
clientTestCmd(sys.argv[2:])
elif sys.argv[1] == "server"[:len(sys.argv[1])]:
serverTestCmd(sys.argv[2:])
else:
printUsage("Unknown command: %s" % sys.argv[1])
|
|
from __future__ import absolute_import
import json
import os
import shutil
import tempfile
import unittest
import mock
import nose
import yaml
from cumulusci.core.tests.utils import EnvironmentVarGuard
from cumulusci.core.config import BaseGlobalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import ScratchOrgConfig
from cumulusci.core.config import ServiceConfig
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.core.keychain import BaseEncryptedProjectKeychain
from cumulusci.core.keychain import EncryptedFileProjectKeychain
from cumulusci.core.keychain import EnvironmentProjectKeychain
from cumulusci.core.exceptions import NotInProject
from cumulusci.core.exceptions import ServiceNotConfigured
from cumulusci.core.exceptions import ServiceNotValid
from cumulusci.core.exceptions import OrgNotFound
from cumulusci.core.exceptions import ProjectConfigNotFound
__location__ = os.path.dirname(os.path.realpath(__file__))
class TestBaseProjectKeychain(unittest.TestCase):
keychain_class = BaseProjectKeychain
def setUp(self):
self.global_config = BaseGlobalConfig()
self.project_config = BaseProjectConfig(self.global_config)
self.project_config.config['services'] = {
'connected_app': {'attributes': {'test': {'required': True}}},
'github': {'attributes': {'name': {'required': True}, 'password': {}}},
'mrbelvedere': {'attributes': {'mr': {'required': True}}},
'not_configured': {'attributes': {'foo': {'required': True}}},
}
self.project_config.project__name = 'TestProject'
self.services = {
'connected_app': ServiceConfig({'test': 'value'}),
'github': ServiceConfig({'name': 'hub'}),
'mrbelvedere': ServiceConfig({'mr': 'belvedere'}),
}
self.org_config = OrgConfig({'foo': 'bar'}, 'test')
self.scratch_org_config = ScratchOrgConfig({'foo': 'bar', 'scratch': True}, 'test_scratch')
self.key = '0123456789123456'
def test_init(self):
self._test_init()
def _test_init(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.project_config, self.project_config)
self.assertEquals(keychain.key, self.key)
def test_set_non_existant_service(self):
self._test_set_non_existant_service()
def _test_set_non_existant_service(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotValid) as context:
keychain.set_service(
'doesnotexist', ServiceConfig({'name': ''}), project)
def test_set_invalid_service(self):
self._test_set_invalid_service()
def _test_set_invalid_service(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotValid) as context:
keychain.set_service(
'github', ServiceConfig({'name': ''}), project)
def test_get_service_not_configured(self):
self._test_get_service_not_configured()
def _test_get_service_not_configured(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
with self.assertRaises(ServiceNotConfigured) as context:
keychain.get_service('not_configured')
def test_change_key(self):
self._test_change_key()
def _test_change_key(self):
new_key = '9876543210987654'
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
keychain.set_service('connected_app', self.services['connected_app'])
keychain.set_service('github', self.services['github'])
keychain.set_service('mrbelvedere', self.services['mrbelvedere'])
keychain.change_key(new_key)
self.assertEquals(keychain.key, new_key)
self.assertEquals(keychain.get_service(
'connected_app').config, self.services['connected_app'].config)
self.assertEquals(keychain.get_service(
'github').config, self.services['github'].config)
self.assertEquals(keychain.get_service(
'mrbelvedere').config, self.services['mrbelvedere'].config)
self.assertEquals(keychain.get_org(
'test').config, self.org_config.config)
def test_set_service_github(self):
self._test_set_service_github()
def _test_set_service_github(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_service('github', self.services['github'], project)
self.assertEquals(keychain.get_service(
'github').config, self.services['github'].config)
def test_set_service_mrbelvedere(self):
self._test_set_service_mrbelvedere()
def _test_set_service_mrbelvedere(self, project=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_service('mrbelvedere', self.services[
'mrbelvedere'], project)
self.assertEquals(keychain.get_service(
'mrbelvedere').config, self.services['mrbelvedere'].config)
def test_set_and_get_org(self):
self._test_set_and_get_org()
def _test_set_and_get_org(self, global_org=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config, global_org)
self.assertEquals(list(keychain.orgs.keys()), ['test'])
self.assertEquals(keychain.get_org(
'test').config, self.org_config.config)
def test_set_and_get_scratch_org(self):
self._test_set_and_get_org()
def _test_set_and_get_scratch_org(self, global_org=False):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.scratch_org_config, global_org)
self.assertEquals(list(keychain.orgs.keys()), ['test_scratch'])
org = keychain.get_org('test_scratch')
self.assertEquals(
org.config,
self.scratch_org_config.config,
)
self.assertEquals(
org.__class__,
ScratchOrgConfig,
)
def test_load_scratch_orgs_none(self):
self._test_load_scratch_orgs_none()
def _test_load_scratch_orgs_none(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(list(keychain.orgs), [])
def test_load_scratch_orgs_create_one(self):
self._test_load_scratch_orgs_create_one()
def _test_load_scratch_orgs_create_one(self):
self.project_config.config['orgs'] = {}
self.project_config.config['orgs']['scratch'] = {}
self.project_config.config['orgs']['scratch']['test_scratch_auto'] = {}
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(list(keychain.orgs), ['test_scratch_auto'])
def test_load_scratch_orgs_existing_org(self):
self._test_load_scratch_orgs_existing_org()
def _test_load_scratch_orgs_existing_org(self):
self.project_config.config['orgs'] = {}
self.project_config.config['orgs']['scratch'] = {}
self.project_config.config['orgs']['scratch']['test'] = {}
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(OrgConfig({}, 'test'))
self.assertEquals(list(keychain.orgs), ['test'])
org = keychain.get_org('test')
self.assertEquals(org.scratch, None)
def test_get_org_not_found(self):
self._test_get_org_not_found()
@nose.tools.raises(OrgNotFound)
def _test_get_org_not_found(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.get_org('test'), None)
def test_get_default_org(self):
self._test_get_default_org()
def _test_get_default_org(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, 'test')
org_config.config['default'] = True
keychain.set_org(org_config)
self.assertEquals(keychain.get_default_org()[
1].config, org_config.config)
def test_get_default_org_no_default(self):
self._test_get_default_org_no_default()
def _test_get_default_org_no_default(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.get_default_org()[1], None)
def test_set_default_org(self):
self._test_set_default_org()
def _test_set_default_org(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, 'test')
keychain.set_org(org_config)
keychain.set_default_org('test')
expected_org_config = org_config.config.copy()
expected_org_config['default'] = True
self.assertEquals(
expected_org_config,
keychain.get_default_org()[1].config,
)
def test_unset_default_org(self):
self._test_unset_default_org()
def _test_unset_default_org(self):
keychain = self.keychain_class(self.project_config, self.key)
org_config = self.org_config.config.copy()
org_config = OrgConfig(org_config, 'test')
org_config.config['default'] = True
keychain.set_org(org_config)
keychain.unset_default_org()
self.assertEquals(keychain.get_default_org()[1], None)
def test_list_orgs(self):
self._test_list_orgs()
def _test_list_orgs(self):
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_org(self.org_config)
self.assertEquals(keychain.list_orgs(), ['test'])
def test_list_orgs_empty(self):
self._test_list_orgs_empty()
def _test_list_orgs_empty(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.list_orgs(), [])
class TestEnvironmentProjectKeychain(TestBaseProjectKeychain):
keychain_class = EnvironmentProjectKeychain
def setUp(self):
super(TestEnvironmentProjectKeychain, self).setUp()
self.env = EnvironmentVarGuard()
self._clean_env(self.env)
self.env.set(
'{}test'.format(self.keychain_class.org_var_prefix),
json.dumps(self.org_config.config)
)
self.env.set(
'{}connected_app'.format(self.keychain_class.service_var_prefix),
json.dumps(self.services['connected_app'].config)
)
self.env.set(
'{}github'.format(self.keychain_class.service_var_prefix),
json.dumps(self.services['github'].config)
)
self.env.set(
'{}mrbelvedere'.format(self.keychain_class.service_var_prefix),
json.dumps(self.services['mrbelvedere'].config)
)
def _clean_env(self, env):
for key, value in list(env.items()):
if key.startswith(self.keychain_class.org_var_prefix):
del env[key]
for key, value in list(env.items()):
if key.startswith(self.keychain_class.service_var_prefix):
del env[key]
def test_get_org(self):
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(list(keychain.orgs.keys()), ['test'])
self.assertEquals(keychain.get_org(
'test').config, self.org_config.config)
def _test_list_orgs(self):
with self.env:
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.list_orgs(), ['test'])
def test_list_orgs_empty(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
env.set(
'{}connected_app'.format(self.keychain_class.service_var_prefix),
json.dumps(self.services['connected_app'].config)
)
self._test_list_orgs_empty()
def test_load_scratch_org_config(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
env.set(
'{}test'.format(self.keychain_class.org_var_prefix),
json.dumps(self.scratch_org_config.config)
)
keychain = self.keychain_class(self.project_config, self.key)
self.assertEquals(keychain.list_orgs(), ['test'])
self.assertEquals(keychain.orgs['test'].__class__, ScratchOrgConfig)
def test_load_scratch_orgs_none(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
self._test_load_scratch_orgs_none()
def test_load_scratch_orgs_create_one(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
self._test_load_scratch_orgs_create_one()
def test_get_org_not_found(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
self._test_get_org_not_found()
def test_get_default_org(self):
with EnvironmentVarGuard() as env:
self._clean_env(env)
org_config = self.org_config.config.copy()
org_config['default'] = True
self.env.set(
'{}test'.format(self.keychain_class.org_var_prefix),
json.dumps(org_config)
)
self._test_get_default_org()
def test_set_default_org(self):
""" The EnvironmentProjectKeychain does not persist default org settings """
with EnvironmentVarGuard() as env:
self._clean_env(env)
org_config = self.org_config.config.copy()
self.env.set(
'{}test'.format(self.keychain_class.org_var_prefix),
json.dumps(org_config)
)
keychain = self.keychain_class(self.project_config, self.key)
keychain.set_default_org('test')
expected_org_config = self.org_config.config.copy()
expected_org_config['default'] = True
self.assertEquals(
None,
keychain.get_default_org()[1],
)
class TestBaseEncryptedProjectKeychain(TestBaseProjectKeychain):
keychain_class = BaseEncryptedProjectKeychain
def test_decrypt_config_no_config(self):
keychain = self.keychain_class(self.project_config, self.key)
config = keychain._decrypt_config(OrgConfig, None, extra=['test'])
self.assertEquals(config.__class__, OrgConfig)
self.assertEquals(config.config, {})
@mock.patch('os.path.expanduser')
class TestEncryptedFileProjectKeychain(TestBaseProjectKeychain):
keychain_class = EncryptedFileProjectKeychain
def setUp(self):
self.global_config = BaseGlobalConfig()
self.project_config = BaseProjectConfig(self.global_config)
self.project_config.config['services'] = {
'connected_app': {'attributes': {'test': {'required': True}}},
'github': {'attributes': {'git': {'required': True}, 'password': {}}},
'mrbelvedere': {'attributes': {'mr': {'required': True}}},
'not_configured': {'attributes': {'foo': {'required': True}}},
}
self.project_config.project__name = 'TestProject'
self.project_name = 'TestProject'
self.org_config = OrgConfig({'foo': 'bar'}, 'test')
self.scratch_org_config = ScratchOrgConfig({'foo': 'bar', 'scratch': True}, 'test_scratch')
self.services = {
'connected_app': ServiceConfig({'test': 'value'}),
'github': ServiceConfig({'git': 'hub'}),
'mrbelvedere': ServiceConfig({'mr': 'belvedere'}),
}
self.key = '0123456789123456'
def _mk_temp_home(self):
self.tempdir_home = tempfile.mkdtemp()
global_local_dir = os.path.join(
self.tempdir_home,
'.cumulusci',
)
os.makedirs(global_local_dir)
def _mk_temp_project(self):
self.tempdir_project = tempfile.mkdtemp()
git_dir = os.path.join(
self.tempdir_project,
'.git',
)
os.makedirs(git_dir)
self._create_git_config()
def _create_git_config(self):
filename = os.path.join(self.tempdir_project, '.git', 'config')
content = (
'[remote "origin"]\n' +
' url = git@github.com:TestOwner/{}'.format(self.project_name)
)
self._write_file(filename, content)
def _write_file(self, filename, content):
with open(filename, 'w') as f:
f.write(content)
def test_init(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_init()
def test_change_key(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_change_key()
def test_set_invalid_service(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_invalid_service()
def test_set_non_existant_service(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_non_existant_service()
def test_get_service_not_configured(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_get_service_not_configured()
def test_set_service_github(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_service_github()
def test_set_service_github_project(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_service_github(True)
def test_set_service_mrbelvedere(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_service_mrbelvedere()
def test_set_service_mrbelvedere_project(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_service_mrbelvedere(True)
def test_set_and_get_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_and_get_org()
def test_set_and_get_scratch_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_and_get_scratch_org()
def test_load_scratch_orgs_none(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_load_scratch_orgs_none()
def test_load_scratch_orgs_create_one(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_load_scratch_orgs_create_one()
def test_load_scratch_orgs_existing_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_load_scratch_orgs_existing_org()
def test_set_and_get_org_global(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_and_get_org(True)
def test_get_org_not_found(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_get_org_not_found()
def test_get_default_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_get_default_org()
def test_get_default_org_no_default(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_get_default_org_no_default()
def test_set_default_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_set_default_org()
def test_unset_default_org(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_unset_default_org()
def test_list_orgs(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_list_orgs()
def test_list_orgs_empty(self, mock_class):
self._mk_temp_home()
self._mk_temp_project()
mock_class.return_value = self.tempdir_home
os.chdir(self.tempdir_project)
self._test_list_orgs_empty()
|
|
import json, urllib, urllib2, zlib
from django.conf import settings
from id_tools import netflix_id_from_input
from webapp.models import Movies
from oauth import OAuthRequest
from oauth.signature_method.hmac_sha1 import OAuthSignatureMethod_HMAC_SHA1
from xml.dom.minidom import parseString
# Return movies from wikipedia given title and year (only sets id currently)
def wikipedia_movies_from_term(search_term, how_many):
movies = []
if search_term:
try:
# Query wikipedia search API
req = urllib2.Request('http://en.wikipedia.org/w/api.php?format=xml&action=query&list=search&srlimit=' + str(how_many) + '&srsearch='+urllib.quote(search_term.encode('ascii', 'ignore'))+'%20film')
res = urllib2.urlopen(req)
if res.getcode() == 200:
# Parse xml response
dom = parseString(res.read())
# If elements returned
if dom.getElementsByTagName('p'):
for elem in dom.getElementsByTagName('p'):
movie = Movies( WikipediaId = elem.getAttribute('title'))
movies.append(movie)
if movies:
return {'movies' : movies}
else:
return {'error_msg' : 'Invalid'}
else:
return {'error_msg' : 'Invalid'}
else:
return {'error_msg' : 'Invalid'}
except Exception:
return {'error_msg' : 'Wikipedia API failed, please try again.'}
else:
return {'error_msg' : 'Could not be parsed from input.'}
# Return list of movies from imdb given search term
def imdb_movies_from_term(search_term, page_limit):
movies = []
if search_term and len(search_term) > 0:
#try:
# Query omdbapi (not affiliated with imdb)
search_term = urllib.quote(search_term.encode('ascii', 'ignore'))
req = urllib2.Request('http://www.omdbapi.com/?s='+search_term+'&r=JSON')
res = urllib2.urlopen(req)
if res.getcode() == 200:
# Parse json response
res_dict = json.loads(res.read())
if res_dict.get('Search'):
min_length = page_limit if page_limit < len(res_dict.get('Search')) else len(res_dict.get('Search'))
for i in range(min_length):
movie = Movies()
imdb_dict = res_dict.get('Search')[i]
if imdb_dict.get('Type'):
if imdb_dict.get('Type') == 'episode' or imdb_dict.get('Type') == 'series' or imdb_dict.get('Type') == 'game':
continue
if imdb_dict.get('imdbID'):
movie.ImdbId = imdb_dict.get('imdbID')
if imdb_dict.get('Title'):
movie.Title = imdb_dict.get('Title')
if imdb_dict.get('Year'):
movie.Year = int(imdb_dict.get('Year'))
if imdb_dict.get('Runtime'):
runtime_str = imdb_dict.get('Runtime')
# Convert [/d+] min to minutes
try:
runtime = int(runtime_str[:runtime_str.find(" ")])
except Exception:
pass
movie.Runtime = str(runtime)
if movie.ImdbId != '' and movie.Title != '' and movie.Year != 0 and movie.Runtime != '':
movies.append(movie)
else:
return {'error' : 'No IMDb results found, please try again.'}
else:
return {'error_msg' : 'IMDb API failed, please try again.'}
#except Exception:
# return {'error_msg' : 'IMDb API failed, please try again.'}
else:
return {'error_msg' : 'No term to search from.'}
if len(movies) > 0:
return {'movies' : movies}
else:
return {'error_msg' : 'No results found.'}
# Return list of movies from netflix given search term
def netflix_movies_from_term(search_term, page_limit):
movies = []
if search_term and len(search_term) > 0:
try:
# Query netflix search API with OAuth
consumer = {'oauth_token': settings.API_KEYS['NETFLIX'], 'oauth_token_secret': settings.API_KEYS['NETFLIX_SECRET']}
params = {'term' : search_term, 'start_index' : 0, 'max_results' : page_limit}
request = OAuthRequest('http://api-public.netflix.com/catalog/titles', 'GET', params)
request.sign_request(OAuthSignatureMethod_HMAC_SHA1, consumer)
url = request.to_url(include_oauth=True)
req = urllib2.Request(url)
res = urllib2.urlopen(req)
if res.getcode() == 200:
dom = parseString(res.read())
for node in dom.getElementsByTagName('catalog_title'):
id, title, year = '', '', 0
if node.getElementsByTagName('id') and node.getElementsByTagName('id')[0]:
id = netflix_id_from_input(node.getElementsByTagName('id')[0].childNodes[0].data)
if node.getElementsByTagName('title') and node.getElementsByTagName('title')[0]:
title = node.getElementsByTagName('title')[0].getAttribute('regular')
if title == '':
title = node.getElementsByTagName('title')[0].getAttribute('short')
if node.getElementsByTagName('release_year') and node.getElementsByTagName('release_year')[0]:
year = int(node.getElementsByTagName('release_year')[0].childNodes[0].data)
if id != '' and title != '' and year != 0:
movie = Movies(Title = title, Year = year, NetflixId = id)
movies.append(movie)
else:
return {'error_msg' : 'Invalid Netflix Search'}
else:
return {'error_msg' : 'Invalid Netflix Search'}
except Exception:
return {'error_msg' : 'Netflix API failed, please try again'}
else:
return {'error_msg' : 'No term to search from.'}
if len(movies) > 0:
return {'movies' : movies}
else:
return {'error_msg' : 'No results found.'}
# Return list of movies from rotten tomatoes given search term
def rottentomatoes_movies_from_term(search_term, page_limit):
movies = []
if search_term and len(search_term) > 0:
try:
# Query rotten tomatoes search API
encoded_title = urllib.quote(search_term)
req = urllib2.Request('http://api.rottentomatoes.com/api/public/v1.0/movies.json?q='+encoded_title+'&page_limit=' + str(page_limit) + '&page=1&apikey='+settings.API_KEYS['ROTTEN_TOMATOES'])
res = urllib2.urlopen(req)
if res.getcode() == 200:
data = res.read()
if res.info().get("Content-Encoding") == 'gzip':
data = zlib.decompress(data, 16+zlib.MAX_WBITS)
rottentomatoes_dict = json.loads(data)
if rottentomatoes_dict.get('total') > 0:
min_length = page_limit if page_limit < rottentomatoes_dict.get('total') else rottentomatoes_dict.get('total')
for i in range(min_length):
movie_dict = rottentomatoes_dict.get('movies')[i]
movie = Movies(Title = movie_dict.get('title'), Year = movie_dict.get('year'), RottenTomatoesId = movie_dict.get('id'))
movies.append(movie)
else:
return {'error_msg' : 'Invalid Rotten Tomatoes Search'}
else:
return {'error_msg' : 'Rotten Tomatoes API failed, please try again.'}
except Exception:
return {'error_msg' : 'Rotten Tomatoes API failed, please try again.'}
else:
return {'error_msg' : 'No term to search from.'}
if len(movies) > 0:
return {'movies' : movies}
else:
return {'error_msg' : 'No results found.'}
# Return dictionary of uncombined lists of movies from all sources given search term and length
def movies_from_apis_term(search_term, how_many):
error_list = {}
movies = []
success = True
imdb_movies, rottentomatoes_movies, netflix_movies, wikipedia_movies = [], [], [], []
# Search imdb, rotten tomatoes and netflix
imdb_dict = imdb_movies_from_term(search_term, how_many)
rt_dict = rottentomatoes_movies_from_term(search_term, how_many)
net_dict = netflix_movies_from_term(search_term, how_many)
wiki_dict = wikipedia_movies_from_term(search_term, how_many)
if imdb_dict.get('movies'):
imdb_movies = imdb_dict.get('movies')
else:
error_list['ImdbSearch'] = imdb_dict.get('error_msg')
if rt_dict.get('movies'):
rottentomatoes_movies = rt_dict.get('movies')
else:
error_list['RottenTomatoesSearch'] = rt_dict.get('error_msg')
if net_dict.get('movies'):
netflix_movies = net_dict.get('movies')
else:
error_list['NetflixSearch'] = net_dict.get('error_msg')
if wiki_dict.get('movies'):
wikipedia_movies = wiki_dict.get('movies')
else:
error_list['WikipediaSearch'] = wiki_dict.get('error_msg')
return {'imdb_movies' : imdb_movies, 'netflix_movies' : netflix_movies, 'rottentomatoes_movies' : rottentomatoes_movies, 'wikipedia_movies' : wikipedia_movies}
# Return list of movies given search term and length
def movies_from_term(search_term, how_many):
error_list = {}
movies = []
success = True
imdb_movies, rottentomatoes_movies, netflix_movies = [], [], []
if search_term and len(search_term) > 0:
# Search imdb, rotten tomatoes and netflix
imdb_dict = imdb_movies_from_term(search_term, how_many)
rt_dict = rottentomatoes_movies_from_term(search_term, how_many)
net_dict = netflix_movies_from_term(search_term, how_many)
if imdb_dict.get('movies'):
imdb_movies = imdb_dict.get('movies')
else:
error_list['ImdbSearch'] = imdb_dict.get('error_msg')
if rt_dict.get('movies'):
rottentomatoes_movies = rt_dict.get('movies')
else:
error_list['RottenTomatoesSearch'] = rt_dict.get('error_msg')
if net_dict.get('movies'):
netflix_movies = net_dict.get('movies')
else:
error_list['NetflixSearch'] = net_dict.get('error_msg')
if not imdb_dict.get('movies') and not rt_dict.get('movies') and not net_dict.get('movies'):
success = False
# Connect all of the results
for movie in imdb_movies:
if not movie.Year:
break
for rt_movie in rottentomatoes_movies:
if movie.Title.lower() in rt_movie.Title.lower() and rt_movie.Year and abs(movie.Year - rt_movie.Year) < 2:
movie.RottenTomatoesId = rt_movie.RottenTomatoesId
rottentomatoes_movies.remove(rt_movie)
break
for net_movie in netflix_movies:
if movie.Title.lower() in net_movie.Title.lower() and net_movie.Year and abs(movie.Year - net_movie.Year) < 2:
movie.NetflixId = net_movie.NetflixId
netflix_movies.remove(net_movie)
break
if movie.RottenTomatoesId == '':
for rt_movie in rottentomatoes_movies:
if rt_movie.Year and abs(movie.Year - rt_movie.Year) < 2:
movie.RottenTomatoesId = rt_movie.RottenTomatoesId
rt_movies.remove(rt_movie)
break
if movie.NetflixId == '':
if net_movie.Year and abs(movie.Year - net_movie.Year) < 2:
movie.NetflixId = net_movie.NetflixId
netflix_movies.remove(net_movie)
break
if movie.RottenTomatoesId != '' and movie.NetflixId != '':
movies.append(movie)
else:
success = False
return {'success': success, 'movies' : movies, 'error_list' : error_list}
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from keystone import auth
from keystone import exception
from keystone import tests
# for testing purposes only
METHOD_NAME = 'simple_challenge_response'
EXPECTED_RESPONSE = uuid.uuid4().hex
DEMO_USER_ID = uuid.uuid4().hex
class SimpleChallengeResponse(auth.AuthMethodHandler):
method = METHOD_NAME
def authenticate(self, context, auth_payload, user_context):
if 'response' in auth_payload:
if auth_payload['response'] != EXPECTED_RESPONSE:
raise exception.Unauthorized('Wrong answer')
user_context['user_id'] = DEMO_USER_ID
else:
return {"challenge": "What's the name of your high school?"}
class DuplicateAuthPlugin(SimpleChallengeResponse):
"""Duplicate simple challenge response auth plugin."""
class MismatchedAuthPlugin(SimpleChallengeResponse):
method = uuid.uuid4().hex
class NoMethodAuthPlugin(auth.AuthMethodHandler):
"""An auth plugin that does not supply a method attribute."""
def authenticate(self, context, auth_payload, auth_context):
pass
class TestAuthPlugin(tests.SQLDriverOverrides, tests.TestCase):
def setUp(self):
super(TestAuthPlugin, self).setUp()
self.load_backends()
self.api = auth.controllers.Auth()
def config_files(self):
config_files = super(TestAuthPlugin, self).config_files()
config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
def config_overrides(self):
super(TestAuthPlugin, self).config_overrides()
method_opts = dict(
[
('external', 'keystone.auth.plugins.external.DefaultDomain'),
('password', 'keystone.auth.plugins.password.Password'),
('token', 'keystone.auth.plugins.token.Token'),
(METHOD_NAME,
'keystone.tests.test_auth_plugin.SimpleChallengeResponse'),
])
self.auth_plugin_config_override(
methods=['external', 'password', 'token', METHOD_NAME],
**method_opts)
def test_unsupported_auth_method(self):
method_name = uuid.uuid4().hex
auth_data = {'methods': [method_name]}
auth_data[method_name] = {'test': 'test'}
auth_data = {'identity': auth_data}
self.assertRaises(exception.AuthMethodNotSupported,
auth.controllers.AuthInfo.create,
None,
auth_data)
def test_addition_auth_steps(self):
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'test': 'test'}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(None, auth_data)
auth_context = {'extras': {}, 'method_names': []}
try:
self.api.authenticate({'environment': {}}, auth_info, auth_context)
except exception.AdditionalAuthRequired as e:
self.assertIn('methods', e.authentication)
self.assertIn(METHOD_NAME, e.authentication['methods'])
self.assertIn(METHOD_NAME, e.authentication)
self.assertIn('challenge', e.authentication[METHOD_NAME])
# test correct response
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'response': EXPECTED_RESPONSE}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(None, auth_data)
auth_context = {'extras': {}, 'method_names': []}
self.api.authenticate({'environment': {}}, auth_info, auth_context)
self.assertEqual(DEMO_USER_ID, auth_context['user_id'])
# test incorrect response
auth_data = {'methods': [METHOD_NAME]}
auth_data[METHOD_NAME] = {
'response': uuid.uuid4().hex}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(None, auth_data)
auth_context = {'extras': {}, 'method_names': []}
self.assertRaises(exception.Unauthorized,
self.api.authenticate,
{'environment': {}},
auth_info,
auth_context)
class TestAuthPluginDynamicOptions(TestAuthPlugin):
def config_overrides(self):
super(TestAuthPluginDynamicOptions, self).config_overrides()
# Clear the override for the [auth] ``methods`` option so it is
# possible to load the options from the config file.
self.config_fixture.conf.clear_override('methods', group='auth')
def config_files(self):
config_files = super(TestAuthPluginDynamicOptions, self).config_files()
config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
class TestInvalidAuthMethodRegistration(tests.TestCase):
def test_duplicate_auth_method_registration(self):
self.config_fixture.config(
group='auth',
methods=[
'keystone.tests.test_auth_plugin.SimpleChallengeResponse',
'keystone.tests.test_auth_plugin.DuplicateAuthPlugin'])
self.clear_auth_plugin_registry()
self.assertRaises(ValueError, auth.controllers.load_auth_methods)
def test_no_method_attribute_auth_method_by_class_name_registration(self):
self.config_fixture.config(
group='auth',
methods=['keystone.tests.test_auth_plugin.NoMethodAuthPlugin'])
self.clear_auth_plugin_registry()
self.assertRaises(ValueError, auth.controllers.load_auth_methods)
class TestMapped(tests.TestCase):
def setUp(self):
super(TestMapped, self).setUp()
self.load_backends()
self.api = auth.controllers.Auth()
def config_files(self):
config_files = super(TestMapped, self).config_files()
config_files.append(tests.dirs.tests_conf('test_auth_plugin.conf'))
return config_files
def config_overrides(self):
# don't override configs so we can use test_auth_plugin.conf only
pass
def _test_mapped_invocation_with_method_name(self, method_name):
with mock.patch.object(auth.plugins.mapped.Mapped,
'authenticate',
return_value=None) as authenticate:
context = {'environment': {}}
auth_data = {
'identity': {
'methods': [method_name],
method_name: {'protocol': method_name},
}
}
auth_info = auth.controllers.AuthInfo.create(context, auth_data)
auth_context = {'extras': {},
'method_names': [],
'user_id': uuid.uuid4().hex}
self.api.authenticate(context, auth_info, auth_context)
# make sure Mapped plugin got invoked with the correct payload
((context, auth_payload, auth_context),
kwargs) = authenticate.call_args
self.assertEqual(method_name, auth_payload['protocol'])
def test_mapped_with_remote_user(self):
with mock.patch.object(auth.plugins.mapped.Mapped,
'authenticate',
return_value=None) as authenticate:
# external plugin should fail and pass to mapped plugin
method_name = 'saml2'
auth_data = {'methods': [method_name]}
# put the method name in the payload so its easier to correlate
# method name with payload
auth_data[method_name] = {'protocol': method_name}
auth_data = {'identity': auth_data}
auth_info = auth.controllers.AuthInfo.create(None, auth_data)
auth_context = {'extras': {},
'method_names': [],
'user_id': uuid.uuid4().hex}
environment = {'environment': {'REMOTE_USER': 'foo@idp.com'}}
self.api.authenticate(environment, auth_info, auth_context)
# make sure Mapped plugin got invoked with the correct payload
((context, auth_payload, auth_context),
kwargs) = authenticate.call_args
self.assertEqual(auth_payload['protocol'], method_name)
def test_supporting_multiple_methods(self):
for method_name in ['saml2', 'openid', 'x509']:
self._test_mapped_invocation_with_method_name(method_name)
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_company_summary_info
except ImportError:
bt_company_summary_info = sys.modules[
"onshape_client.oas.models.bt_company_summary_info"
]
try:
from onshape_client.oas.models import global_permission_info
except ImportError:
global_permission_info = sys.modules[
"onshape_client.oas.models.global_permission_info"
]
class BTUserOAuth2SummaryInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("roles",): {
"ANONYMOUS": "ANONYMOUS",
"TOTPPENDINGUSER": "TOTPPENDINGUSER",
"USER": "USER",
"DEVELOPER": "DEVELOPER",
"PARTNER": "PARTNER",
"ONSHAPECOMPANYUSER": "ONSHAPECOMPANYUSER",
"ADMIN": "ADMIN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"client_id": (str,), # noqa: E501
"company": (bt_company_summary_info.BTCompanySummaryInfo,), # noqa: E501
"company_plan": (bool,), # noqa: E501
"email": (str,), # noqa: E501
"first_name": (str,), # noqa: E501
"global_permissions": (
global_permission_info.GlobalPermissionInfo,
), # noqa: E501
"href": (str,), # noqa: E501
"id": (str,), # noqa: E501
"image": (str,), # noqa: E501
"is_guest": (bool,), # noqa: E501
"is_light": (bool,), # noqa: E501
"last_login_time": (datetime,), # noqa: E501
"last_name": (str,), # noqa: E501
"name": (str,), # noqa: E501
"oauth2_scopes": (int,), # noqa: E501
"plan_group": (str,), # noqa: E501
"role": (int,), # noqa: E501
"roles": ([str],), # noqa: E501
"source": (int,), # noqa: E501
"state": (int,), # noqa: E501
"view_ref": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"client_id": "clientId", # noqa: E501
"company": "company", # noqa: E501
"company_plan": "companyPlan", # noqa: E501
"email": "email", # noqa: E501
"first_name": "firstName", # noqa: E501
"global_permissions": "globalPermissions", # noqa: E501
"href": "href", # noqa: E501
"id": "id", # noqa: E501
"image": "image", # noqa: E501
"is_guest": "isGuest", # noqa: E501
"is_light": "isLight", # noqa: E501
"last_login_time": "lastLoginTime", # noqa: E501
"last_name": "lastName", # noqa: E501
"name": "name", # noqa: E501
"oauth2_scopes": "oauth2Scopes", # noqa: E501
"plan_group": "planGroup", # noqa: E501
"role": "role", # noqa: E501
"roles": "roles", # noqa: E501
"source": "source", # noqa: E501
"state": "state", # noqa: E501
"view_ref": "viewRef", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_user_o_auth2_summary_info.BTUserOAuth2SummaryInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
client_id (str): [optional] # noqa: E501
company (bt_company_summary_info.BTCompanySummaryInfo): [optional] # noqa: E501
company_plan (bool): [optional] # noqa: E501
email (str): [optional] # noqa: E501
first_name (str): [optional] # noqa: E501
global_permissions (global_permission_info.GlobalPermissionInfo): [optional] # noqa: E501
href (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
image (str): [optional] # noqa: E501
is_guest (bool): [optional] # noqa: E501
is_light (bool): [optional] # noqa: E501
last_login_time (datetime): [optional] # noqa: E501
last_name (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
oauth2_scopes (int): [optional] # noqa: E501
plan_group (str): [optional] # noqa: E501
role (int): [optional] # noqa: E501
roles ([str]): [optional] # noqa: E501
source (int): [optional] # noqa: E501
state (int): [optional] # noqa: E501
view_ref (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.keypoint_ops."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.utils import test_case
class KeypointOpsTest(test_case.TestCase):
"""Tests for common keypoint operations."""
def test_scale(self):
def graph_fn():
keypoints = tf.constant([
[[0.0, 0.0], [100.0, 200.0]],
[[50.0, 120.0], [100.0, 140.0]]
])
y_scale = tf.constant(1.0 / 100)
x_scale = tf.constant(1.0 / 200)
expected_keypoints = tf.constant([
[[0., 0.], [1.0, 1.0]],
[[0.5, 0.6], [1.0, 0.7]]
])
output = keypoint_ops.scale(keypoints, y_scale, x_scale)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_clip_to_window(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.25], [0.75, 0.75]]
])
output = keypoint_ops.clip_to_window(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_prune_outside_window(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]],
[[np.nan, np.nan], [np.nan, np.nan]]])
output = keypoint_ops.prune_outside_window(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_change_coordinate_frame(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
window = tf.constant([0.25, 0.25, 0.75, 0.75])
expected_keypoints = tf.constant([
[[0, 0.5], [1.0, 1.0]],
[[0.5, -0.5], [1.5, 1.5]]
])
output = keypoint_ops.change_coordinate_frame(keypoints, window)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_keypoints_to_enclosing_bounding_boxes(self):
def graph_fn():
keypoints = tf.constant(
[
[ # Instance 0.
[5., 10.],
[3., 20.],
[8., 4.],
],
[ # Instance 1.
[2., 12.],
[0., 3.],
[5., 19.],
],
], dtype=tf.float32)
bboxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(keypoints)
return bboxes
output = self.execute(graph_fn, [])
expected_bboxes = np.array(
[
[3., 4., 8., 20.],
[0., 3., 5., 19.]
])
self.assertAllClose(expected_bboxes, output)
def test_to_normalized_coordinates(self):
def graph_fn():
keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
output = keypoint_ops.to_normalized_coordinates(
keypoints, 40, 60)
expected_keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_to_normalized_coordinates_already_normalized(self):
if self.has_tpu(): return
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
output = keypoint_ops.to_normalized_coordinates(
keypoints, 40, 60)
return output
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_to_absolute_coordinates(self):
def graph_fn():
keypoints = tf.constant([
[[0.25, 0.5], [0.75, 0.75]],
[[0.5, 0.0], [1.0, 1.0]]
])
output = keypoint_ops.to_absolute_coordinates(
keypoints, 40, 60)
expected_keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_to_absolute_coordinates_already_absolute(self):
if self.has_tpu(): return
def graph_fn():
keypoints = tf.constant([
[[10., 30.], [30., 45.]],
[[20., 0.], [40., 60.]]
])
output = keypoint_ops.to_absolute_coordinates(
keypoints, 40, 60)
return output
with self.assertRaisesOpError('assertion failed'):
self.execute_cpu(graph_fn, [])
def test_flip_horizontal(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]
])
expected_keypoints = tf.constant([
[[0.1, 0.9], [0.2, 0.8], [0.3, 0.7]],
[[0.4, 0.6], [0.5, 0.5], [0.6, 0.4]],
])
output = keypoint_ops.flip_horizontal(keypoints, 0.5)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_horizontal_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]])
flip_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]],
[[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]],
])
output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_vertical(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]
])
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],
[[0.6, 0.4], [0.5, 0.5], [0.4, 0.6]],
])
output = keypoint_ops.flip_vertical(keypoints, 0.5)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_flip_vertical_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]])
flip_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],
[[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]],
])
output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_rot90(self):
def graph_fn():
keypoints = tf.constant([
[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]
])
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],
[[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]],
])
output = keypoint_ops.rot90(keypoints)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_rot90_permutation(self):
def graph_fn():
keypoints = tf.constant([[[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],
[[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]])
rot_permutation = [0, 2, 1]
expected_keypoints = tf.constant([
[[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],
[[0.4, 0.4], [0.3, 0.6], [0.4, 0.5]],
])
output = keypoint_ops.rot90(keypoints,
rotation_permutation=rot_permutation)
return output, expected_keypoints
output, expected_keypoints = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoints)
def test_keypoint_weights_from_visibilities(self):
def graph_fn():
keypoint_visibilities = tf.constant([
[True, True, False],
[False, True, False]
])
per_keypoint_weights = [1.0, 2.0, 3.0]
keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities(
keypoint_visibilities, per_keypoint_weights)
return keypoint_weights
expected_keypoint_weights = [
[1.0, 2.0, 0.0],
[0.0, 2.0, 0.0]
]
output = self.execute(graph_fn, [])
self.assertAllClose(output, expected_keypoint_weights)
def test_keypoint_weights_from_visibilities_no_per_kpt_weights(self):
def graph_fn():
keypoint_visibilities = tf.constant([
[True, True, False],
[False, True, False]
])
keypoint_weights = keypoint_ops.keypoint_weights_from_visibilities(
keypoint_visibilities)
return keypoint_weights
expected_keypoint_weights = [
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0]
]
output = self.execute(graph_fn, [])
self.assertAllClose(expected_keypoint_weights, output)
def test_set_keypoint_visibilities_no_initial_kpt_vis(self):
keypoints_np = np.array(
[
[[np.nan, 0.2],
[np.nan, np.nan],
[-3., 7.]],
[[0.5, 0.2],
[4., 1.0],
[-3., np.nan]],
], dtype=np.float32)
def graph_fn():
keypoints = tf.constant(keypoints_np, dtype=tf.float32)
keypoint_visibilities = keypoint_ops.set_keypoint_visibilities(
keypoints)
return keypoint_visibilities
expected_kpt_vis = [
[False, False, True],
[True, True, False]
]
output = self.execute(graph_fn, [])
self.assertAllEqual(expected_kpt_vis, output)
def test_set_keypoint_visibilities(self):
keypoints_np = np.array(
[
[[np.nan, 0.2],
[np.nan, np.nan],
[-3., 7.]],
[[0.5, 0.2],
[4., 1.0],
[-3., np.nan]],
], dtype=np.float32)
initial_keypoint_visibilities_np = np.array(
[
[False,
True, # Will be overriden by NaN coords.
False], # Will be maintained, even though non-NaN coords.
[True,
False, # Will be maintained, even though non-NaN coords.
False]
])
def graph_fn():
keypoints = tf.constant(keypoints_np, dtype=tf.float32)
initial_keypoint_visibilities = tf.constant(
initial_keypoint_visibilities_np, dtype=tf.bool)
keypoint_visibilities = keypoint_ops.set_keypoint_visibilities(
keypoints, initial_keypoint_visibilities)
return keypoint_visibilities
expected_kpt_vis = [
[False, False, False],
[True, False, False]
]
output = self.execute(graph_fn, [])
self.assertAllEqual(expected_kpt_vis, output)
if __name__ == '__main__':
tf.test.main()
|
|
"""Class to purely handle everything that concerns the board"""
from typing import Tuple, List
import numpy as np
from src import Utils
if not Utils.in_pyinstaller_mode():
from scipy import ndimage
"""Just to adjust the internal representation of color at a single location,
instead of all over the code ;) Just in case. Maybe something else as -1 and 1
could be interesting, see the tick tack toe example"""
WHITE = -1
BLACK = 1
EMPTY = 0
class Board(np.matrix):
"""Class that purely handles the board, as well as board_related functions
The motivation for this was also that we can make a copy of the real board,
and evaluate all the `get_chain`, `check_dead` etc on the copy
"""
def get_chain(self, loc: Tuple[int, int]) -> List[Tuple[int, int]]:
# if run.py was started, we can use scipy and thereby improve performance
if not Utils.in_pyinstaller_mode():
# This method uses morphological operations to find out the
# connected components ie., chains. wikipedia link to
# morphological operation - https://en.wikipedia.org/wiki/Mathematical_morphology
test_matrix = self == self[loc]
label_im, nb_labels = ndimage.label(test_matrix)
label_im = label_im == label_im[loc]
locations = np.where(label_im)
group = list(zip(locations[0],locations[1]))
return group
# if GTPengine.py was started, via pyinstaller for instance, we can't use scipy
# because pyinstaller doesn't seem to be able to handle it
player = self[loc]
# Check if neighbors of same player
to_check = [loc]
group = []
while len(to_check) > 0:
current = to_check.pop()
neighbors = self.get_adjacent_coords(current)
for n in neighbors:
if self[n] == player and n not in group and n not in to_check:
to_check.append(n)
group.append(current)
return group
def check_dead(self, group: List[Tuple[int, int]]) -> bool:
"""Check if a group is dead
Currently done by getting all the neighbors, and checking if any
of them is 0.
"""
total_neighbors = []
for loc in group:
total_neighbors += self.get_adjacent_coords(loc)
for n in total_neighbors:
if self[n] == EMPTY:
return False
return True
"""
Check if group is dead by morphological operation :
This method works better with larger groups. Smaller groups can use the previous method
"""
#recreate the region of interest ie., group
# group_colour = self[group[0]]
# roi = self[group]
# print(roi)
# a = ndimage.binary_dilation(roi)
# b = -(a-roi)
# b = np.lib.pad(self,((1,1),(1,1)),'constant')
# x = np.lib.pad(self,((1,1),(1,1)),'constant',constant_values=-group_colour)
# roi = np.lib.pad(roi, ((1, 1), (1, 1)), 'constant')
# c = np.multiply(b,x) + roi
# d = ndimage.binary_erosion(c)
# new_roi = d[group]
# old_roi = roi[group]
# if new_roi == old_roi:
# return True
# return False
# TODO
# call this just place_stone_and_capture_if_applicable() and rename the one below .._custom_values
# that would be better style. didn't want to do that now because it would break usages of this method
def place_stone_and_capture_if_applicable_default_values(self, loc, player_val):
opponent_val = WHITE if player_val == BLACK else BLACK
self.place_stone_and_capture_if_applicable(loc, player_val, opponent_val, EMPTY)
def place_stone_and_capture_if_applicable(self, loc, player_val, opponent_val, empty_val):
self[loc] = player_val
# remove stones if this move captured them
neighbors = self.get_adjacent_coords(loc)
groups = []
for n in neighbors:
if self[n] == opponent_val:
groups.append(self.get_chain(n))
for g in groups:
if self.check_dead(g):
for c in g:
self[c] = empty_val
def is_on_board(self, col, row):
return 0 <= col < self.shape[0] and 0 <= row < self.shape[1]
def get_all_neighbor_coords(self, loc: Tuple[int, int]):
neighbors = []
deltas = [
(1, 1), (1, 0), (1, -1), (0, -1),
(-1, -1), (-1, 0), (-1, 1), (0, 1)
]
for delta in deltas:
col = loc[0] + delta[0]
row = loc[1] + delta[1]
if self.is_on_board(col, row):
neighbors.append((col, row))
return neighbors
def get_adjacent_coords(self, loc: Tuple[int, int]):
neighbors = []
if loc[0] > 0:
neighbors.append((loc[0]-1, loc[1]))
if loc[0] < self.shape[0]-1:
neighbors.append((loc[0]+1, loc[1]))
if loc[1] > 0:
neighbors.append((loc[0], loc[1]-1))
if loc[1] < self.shape[1]-1:
neighbors.append((loc[0], loc[1]+1))
return neighbors
def to_number(self):
"""Create a unique representation for a board
Does this by creating an integer, with each position indicating a
location on the board. I do this because performence gets bad once
the board history is large
"""
number = 0
i = 0
for entry in np.nditer(self):
if entry == WHITE:
number += 1 * 10**i
elif entry == BLACK:
number += 2 * 10**i
else:
number += 3 * 10**i
i += 1
return number
def __str__(self):
"""String representation of the board!
Just a simple ascii output, quite cool but the code is a bit messy"""
b = self.copy()
rows = list(range(b.shape[0]))
cols = list(range(b.shape[1]))
rows = [str(self.shape[0] - i) for i in rows]
cols = [chr(i + ord('a')) if i < 8 else chr(i + 1 + ord('a')) for i in cols]
# was previously not GTP conform:
# rows = [chr(i + ord('a')) for i in rows]
# cols = [chr(i + ord('a')) for i in cols]
# You might wonder why I do the following, but its so that numpy
# formats the str representation using a single space
b[b == BLACK] = 2
b[b == WHITE] = 3
matrix_repr = super(Board, b).__str__()
matrix_repr = matrix_repr.replace('2', 'X')
matrix_repr = matrix_repr.replace('3', 'O')
matrix_repr = matrix_repr.replace('0', '.')
matrix_repr = matrix_repr.replace('[[', ' [')
matrix_repr = matrix_repr.replace(']]', ']')
col_index = ' '.join(cols)
board_repr = ''
for i in zip(rows, matrix_repr.splitlines()):
board_repr += i[0]+i[1]+'\n'
board_repr = ' '*3 + col_index+'\n'+board_repr
return board_repr
###########################################################################
# Not used yet, but more relevant to `Board` than to `Game`
def _matrix2csv(self, matrix):
"""Transform a matrix to a string, using ';' as the separator"""
ls = matrix.tolist()
ls = [str(entry) for row in ls for entry in row]
s = ';'.join(ls)
return s
def board2file(self, file, mode='a'):
"""Store board to a file
The idea is also to create csv files that contain
all boards that were part of a game, so that we can
use those to train a network on.
"""
string = self._matrix2csv(self.board)
with open(file, mode) as f:
f.write(string)
f.write('\n')
if __name__ == '__main__':
import doctest
# doctest.testmod(extraglobs={'g': Game()})
doctest.testmod()
|
|
"""Tests -- run with nosetests, or from command line python -mtatl.test [-u]
(The -u will update artifacts)
"""
from tatl import ExprParser, ExprSemantics, Compiler, front_matter
import os, sys, json, glob
from contextlib import contextmanager
import traceback, cStringIO
import tatlrt
G_TESTS = 'grammar/test.txt'
G_EXPECT = 'grammar/test.expect.json'
RULE = 'attrs'
TESTDIRS = ['tests/*.html', 'docs/examples/*.html']
OUT = 'tests/out/'
EXPECT = 'tests/expect/'
EXCLUDE = 'tests/skip.txt'
def test_grammar(update=0):
parser = ExprParser.ExprParser(
parseinfo=True,
semantics=ExprSemantics.ExprSemantics()
)
if os.path.exists(G_EXPECT):
expect = json.load(open(G_EXPECT))
else:
assert update, "Missing %s -- run in update mode (-u)" % EXPECT
expect = {}
updated = {}
fail = 0
for line in open(G_TESTS):
line = line.rstrip()
try:
ast = parser.parse(line, rule_name=RULE)
if hasattr(ast, 'out'):
out = ast.out()
else:
out = repr(ast)
except:
print 'FAILURE:', line
traceback.print_exc()
import pdb
pdb.post_mortem()
fail += 1
else:
if line not in expect or expect[line] != out:
print 'MISMATCH:',line
print '----- got -----'
print out
print '---- expect ---'
print expect.get(line, '*missing*')
print '---------------'
fail += 1
updated[line] = out
else:
updated[line] = expect[line]
if update and updated != expect:
with open(G_EXPECT, 'w') as f:
json.dump(updated, f, indent=4, sort_keys=True)
print "Wrote", G_EXPECT
else:
assert not fail, "%d failures" % fail
class Case:
def __init__(self, path):
self.path = path
self.file = os.path.split(path)[1]
base = os.path.splitext(self.file)[0]
self.outbase = os.path.join(OUT, base)
self.expectbase = os.path.join(EXPECT, base)
def read(self):
return read(self.path)
def out(self, suffix, output, compare=None, update=False):
if isinstance(output, str):
# wtf
try:
output = output.decode('ascii')
except:
# wtf*2
print self.path, suffix, 'Bogus data!'
raise
outf = self.outbase+suffix
with open(outf, 'w') as f:
f.write(output.encode('utf8'))
if compare:
expectf = self.expectbase+suffix
expect = read(expectf).decode('utf8')
if output == expect:
pass
elif update:
with open(expectf, 'w') as f:
f.write(output.encode('utf8'))
print "Wrote", expectf
else:
return compare(outf, expectf)
return outf
def front_matter(self):
try:
return front_matter(self.path)
except:
print "WARNING: front matter failed", self.path
return {}
def read(filename, default=''):
try:
f = open(filename)
data = f.read()
f.close()
return data
except Exception, e:
return default
def test_tatl():
# yield our test cases so that nosetests sees them as individual cases
if not os.path.exists(OUT): os.makedirs(OUT)
if not os.path.exists(EXPECT): os.makedirs(EXPECT)
try:
with open(EXCLUDE) as f:
exclude = set(f.read().splitlines())
except:
exclude = set()
tests = []
for pattern in TESTDIRS:
tests += [Case(f) for f in glob.glob(pattern) if f not in exclude]
runtest = Runner(False).runtest
for test in tests:
yield runtest, test
class Runner:
def __init__(self, update):
self.update = update
def log(self, *args):
pass
def skipped(self, test):
self.log('Skipped:', test.file)
return True
def start(self, test):
self.log('----', test.path)
def warn(self, text):
if text in self.expected_warnings:
if self.expected_warnings.index(text) > 0:
self.log('Warning out of order:', text)
self.expected_warnings.remove(text)
else:
self.log('Unexpected warning:', text)
def runpy_failed(self, test, py):
print "Error running", test.path
raise
def runtest(self, test):
self.start(test)
fm = test.front_matter()
if fm.get('test') == 'skip':
return self.skipped(test)
self.expected_warnings = fm.get('expect', {}).get('warn', [])
inp = test.read()
self.log(inp)
pyrun = jsrun = None
py = None
try:
py = Compiler.compile(inp, test.file, out='py', warn=self.warn)
pyc = compile(py, test.file, 'exec')
except:
if py:
print py
self.compile_fail(inp, test, 'py')
else:
self.log('--py:\n', py)
c = test.out('.py', py, False)
tatlrt.use_fast(False)
try:
pyrun = runpy(py).rstrip() + '\n'
except:
pyrun = self.runpy_failed(test, py)
pyout = test.out('.py.html', pyrun, self.compare, self.update)
self.log('->', c)
if tatlrt.use_fast(True):
self.log("checking fast")
try:
pyfrun = runpy(py).rstrip() + '\n'
except:
pyfrun = self.runpy_failed(test, py)
pyfout = test.out('.fast.py.html', pyfrun, self.compare, self.update)
if pyfrun != pyrun:
self.run_mismatch(pyfout, pyout)
else:
self.log('Could not use fast module')
import pdb
pdb.set_trace()
print 'fast->', tatlrt.use_fast(True)
try:
js = Compiler.compile(inp, test.file, out='js', warn=self.warn)
except:
self.compile_fail(inp, test, 'js')
else:
self.log('--js:\n', js)
c = test.out('.js', js, False)
self.log('->', c)
jsrun = runjsfile(c).rstrip() + '\n'
jsout = test.out('.js.html', jsrun, self.compare, self.update)
if pyrun and jsrun and pyrun != jsrun:
self.run_mismatch(pyout, jsout)
return self.done(test)
def compile_fail(self, inp, test, target):
print 'Compile failed:', test.path, '->', target
raise
def compare(self, outf, expectf):
# files do not match - return outf to keep processing
raise AssertionError("%s != %s" % (outf, expectf))
def done(self, test):
for w in self.expected_warnings:
self.log('Expected warning:', w)
return True
def run_mismatch(self, pyout, jsout):
self.log("WARNING: %s and %s output should match" % (pyout, jsout))
self.log("diff", pyout, jsout)
#os.system("diff %s %s" % (pyout, jsout))
class VerboseRunner(Runner):
fail = mismatch = 0
def start(self, test):
self.log('----', test.path)
self.fail = self.mismatch = 0
def log(self, *args):
print ' '.join(map(unicode, args))
def compare(self, outf, expectf):
# files do not match - return outf to keep processing
self.log("Failed! %s != %s" % (outf, expectf))
self.fail = 1
return outf
def done(self, test):
self.log("head %s.*" % test.outbase)
return not (self.fail or self.mismatch)
def run_mismatch(self, pyout, jsout):
self.mismatch = 1
self.log("WARNING: %s and %s output should match" % (pyout, jsout))
self.log("diff", pyout, jsout)
self.log(os.popen("diff %s %s" % (pyout, jsout)).read().decode('utf8'))
def compile_fail(self, inp, test, target):
print 'Compile failed:', test.path, '->', target
traceback.print_exc()
print '-----'
print inp
print '-----'
self.fail = True
def runpy_failed(self, test, py):
print 'Runpy failed:', test.path,
traceback.print_exc()
print '-----'
print py
print '-----'
self.fail = True
class VerboseOnFailRunner(VerboseRunner):
logs = None
def log(self, *args):
if self.logs is None:
self.logs = []
self.logs.append(args)
def start(self, test):
VerboseRunner.start(self, test)
print '----', test.path
self.logs = []
def done(self, test):
r = VerboseRunner.done(self, test)
if not r:
def uni(s):
try: return unicode(s)
except: return repr(s)
for args in self.logs:
print ' '.join(map(uni, args))
else:
print '---> Test ok', test.file
return r
def runpy(pycode):
d = {}
exec pycode in d, d
return d['html'](a='a', b=[1, 2], c=1, d={'a':'AA', 'b': [1,2,3]})
def runjsfile(jsfile):
rel_mod = os.path.splitext(jsfile)[0]
assert "\\" not in rel_mod
assert "'" not in rel_mod
data = "{a: 'a', b: [1, 2], c:1, d: {a:'AA', b: [1,2,3]}}"
js = "require('./%s').html.call(%s)+'';" % (rel_mod, data)
assert '"' not in js
cmd = 'node -p -e "%s" 2>&1' % js
print>>sys.stderr, cmd
return os.popen(cmd).read().decode('utf8')
if __name__ == '__main__':
print "Running tests... (pass -u to update)"
io = cStringIO.StringIO()
import sys
sys.path.append('.') # include tatlrt.py
sys.path.append('tests/out') # so that tests can import each other
ExprSemantics.DEBUG = True
args = sys.argv[1:]
update = '-u' in args
if update: args.remove('-u')
verbose = '-v' in args
if verbose:
args.remove('-v')
runner = VerboseOnFailRunner(update)
else:
runner = Runner(update)
keepgoing = '-c' in args
if keepgoing: args.remove('-c')
try:
test_grammar(update)
fails = []
for fn, test in test_tatl():
if not args or test.path in args:
#import pdb
#pdb.set_trace()
ok = runner.runtest(test)
if ok: continue
fails.append(test.path)
if not keepgoing:
break
for f in fails:
print f
except Exception, e:
traceback.print_exc()
sys.exit(1)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
import wrapt
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import test
from tensorflow.python.util.compat import collections_abc
# NOTE(mrry): Arguments of parameterized tests are lifted into lambdas to make
# sure they are not executed before the (eager- or graph-mode) test environment
# has been set up.
#
# TODO(jsimsa): Add tests for OptionalStructure and DatasetStructure.
class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
test_util.TensorFlowTestCase):
# pylint: disable=g-long-lambda,protected-access
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), tensor_spec.TensorSpec,
[dtypes.float32], [[]]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArraySpec, [dtypes.variant], [[]]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorSpec, [dtypes.variant], [None]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [4]]),
ragged_tensor.RaggedTensorSpec, [dtypes.variant], [None]),
("Nested_0",
lambda: (constant_op.constant(37.0), constant_op.constant([1, 2, 3])),
tuple, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_1", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, dict, [dtypes.float32, dtypes.int32], [[], [3]]),
("Nested_2", lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}, dict, [dtypes.float32, dtypes.variant, dtypes.variant], [[], None, None
]),
)
def testFlatStructure(self, value_fn, expected_structure, expected_types,
expected_shapes):
value = value_fn()
s = structure.type_spec_from_value(value)
self.assertIsInstance(s, expected_structure)
flat_types = structure.get_flat_tensor_types(s)
self.assertEqual(expected_types, flat_types)
flat_shapes = structure.get_flat_tensor_shapes(s)
self.assertLen(flat_shapes, len(expected_shapes))
for expected, actual in zip(expected_shapes, flat_shapes):
if expected is None:
self.assertEqual(actual.ndims, None)
else:
self.assertEqual(actual.as_list(), expected)
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0), lambda: [
constant_op.constant(38.0),
array_ops.placeholder(dtypes.float32),
variables.Variable(100.0), 42.0,
np.array(42.0, dtype=np.float32)
], lambda: [constant_op.constant([1.0, 2.0]),
constant_op.constant(37)]),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0), lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=10)
], lambda: [
tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(3,), size=0),
tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=0)
]),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: [
sparse_tensor.SparseTensor(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
sparse_tensor.SparseTensorValue(
indices=[[1, 1], [3, 4]], values=[10, -1], dense_shape=[4, 5]),
array_ops.sparse_placeholder(dtype=dtypes.int32),
array_ops.sparse_placeholder(dtype=dtypes.int32, shape=[None, None])
], lambda: [
constant_op.constant(37, shape=[4, 5]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[5, 6]),
array_ops.sparse_placeholder(
dtype=dtypes.int32, shape=[None, None, None]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1.0], dense_shape=[4, 5])
]),
("RaggedTensor", lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),
lambda: [
ragged_factory_ops.constant([[1, 2], [3, 4], []]),
ragged_factory_ops.constant([[1], [2, 3, 4], [5]]),
], lambda: [
ragged_factory_ops.constant(1),
ragged_factory_ops.constant([1, 2]),
ragged_factory_ops.constant([[1], [2]]),
ragged_factory_ops.constant([["a", "b"]]),
]),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6])
}], lambda: [{
"a": constant_op.constant(15.0),
"b": constant_op.constant([4, 5, 6, 7])
}, {
"a": constant_op.constant(15),
"b": constant_op.constant([4, 5, 6])
}, {
"a":
constant_op.constant(15),
"b":
sparse_tensor.SparseTensor(
indices=[[0], [1], [2]], values=[4, 5, 6], dense_shape=[3])
}, (constant_op.constant(15.0), constant_op.constant([4, 5, 6]))]),
)
@test_util.run_deprecated_v1
def testIsCompatibleWithStructure(self, original_value_fn,
compatible_values_fn,
incompatible_values_fn):
original_value = original_value_fn()
compatible_values = compatible_values_fn()
incompatible_values = incompatible_values_fn()
s = structure.type_spec_from_value(original_value)
for compatible_value in compatible_values:
self.assertTrue(
structure.are_compatible(
s, structure.type_spec_from_value(compatible_value)))
for incompatible_value in incompatible_values:
self.assertFalse(
structure.are_compatible(
s, structure.type_spec_from_value(incompatible_value)))
@parameterized.named_parameters(
("Tensor",
lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0),
lambda: constant_op.constant([5])),
("TensorArray",
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3]], values=[-1], dense_shape=[5]),
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[1.0], dense_shape=[4, 5])),
("RaggedTensor",
lambda: ragged_factory_ops.constant([[[1, 2]], [[3]]]),
lambda: ragged_factory_ops.constant([[[5]], [[8], [3, 2]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2], [3]]],
ragged_rank=1),
lambda: ragged_factory_ops.constant([[[1.0, 2.0]], [[3.0]]]),
lambda: ragged_factory_ops.constant([[[1]], [[2]], [[3]]])),
("Nested",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])},
lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])},
lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
) # pyformat: disable
def testStructureFromValueEquality(self, value1_fn, value2_fn,
*not_equal_value_fns):
# pylint: disable=g-generic-assert
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
self.assertEqual(s1, s1) # check __eq__ operator.
self.assertEqual(s1, s2) # check __eq__ operator.
self.assertFalse(s1 != s1) # check __ne__ operator.
self.assertFalse(s1 != s2) # check __ne__ operator.
for c1, c2 in zip(nest.flatten(s1), nest.flatten(s2)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
for value_fn in not_equal_value_fns:
s3 = structure.type_spec_from_value(value_fn())
self.assertNotEqual(s1, s3) # check __ne__ operator.
self.assertNotEqual(s2, s3) # check __ne__ operator.
self.assertFalse(s1 == s3) # check __eq_ operator.
self.assertFalse(s2 == s3) # check __eq_ operator.
@parameterized.named_parameters(
("RaggedTensor_RaggedRank",
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 2)),
("RaggedTensor_Shape",
ragged_tensor.RaggedTensorSpec([3, None], dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec([5, None], dtypes.int32, 1)),
("RaggedTensor_DType",
ragged_tensor.RaggedTensorSpec(None, dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec(None, dtypes.float32, 1)),
)
def testRaggedStructureInequality(self, s1, s2):
# pylint: disable=g-generic-assert
self.assertNotEqual(s1, s2) # check __ne__ operator.
self.assertFalse(s1 == s2) # check __eq__ operator.
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
lambda: constant_op.constant(42.0), lambda: constant_op.constant([5])),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(3,), size=0),
lambda: tensor_array_ops.TensorArray(
dtype=dtypes.int32, element_shape=(), size=0)),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
lambda: sparse_tensor.SparseTensor(
indices=[[1, 2]], values=[42], dense_shape=[4, 5]), lambda:
sparse_tensor.SparseTensor(indices=[[3]], values=[-1], dense_shape=[5])),
("Nested", lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}, lambda: {
"a": constant_op.constant(42.0),
"b": constant_op.constant([4, 5, 6])
}, lambda: {
"a": constant_op.constant([1, 2, 3]),
"b": constant_op.constant(37.0)
}),
)
def testHash(self, value1_fn, value2_fn, value3_fn):
s1 = structure.type_spec_from_value(value1_fn())
s2 = structure.type_spec_from_value(value2_fn())
s3 = structure.type_spec_from_value(value3_fn())
for c1, c2, c3 in zip(nest.flatten(s1), nest.flatten(s2), nest.flatten(s3)):
self.assertEqual(hash(c1), hash(c1))
self.assertEqual(hash(c1), hash(c2))
self.assertNotEqual(hash(c1), hash(c3))
self.assertNotEqual(hash(c2), hash(c3))
@parameterized.named_parameters(
(
"Tensor",
lambda: constant_op.constant(37.0),
),
(
"SparseTensor",
lambda: sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]),
),
("TensorArray", lambda: tensor_array_ops.TensorArray(
dtype=dtypes.float32, element_shape=(), size=1).write(0, 7)),
(
"RaggedTensor",
lambda: ragged_factory_ops.constant([[1, 2], [], [3]]),
),
(
"Nested_0",
lambda: {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
},
),
(
"Nested_1",
lambda: {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
},
),
)
def testRoundTripConversion(self, value_fn):
value = value_fn()
s = structure.type_spec_from_value(value)
def maybe_stack_ta(v):
if isinstance(v, tensor_array_ops.TensorArray):
return v.stack()
else:
return v
before = self.evaluate(maybe_stack_ta(value))
after = self.evaluate(
maybe_stack_ta(
structure.from_tensor_list(s, structure.to_tensor_list(s, value))))
flat_before = nest.flatten(before)
flat_after = nest.flatten(after)
for b, a in zip(flat_before, flat_after):
if isinstance(b, sparse_tensor.SparseTensorValue):
self.assertAllEqual(b.indices, a.indices)
self.assertAllEqual(b.values, a.values)
self.assertAllEqual(b.dense_shape, a.dense_shape)
elif isinstance(
b,
(ragged_tensor.RaggedTensor, ragged_tensor_value.RaggedTensorValue)):
self.assertAllEqual(b, a)
else:
self.assertAllEqual(b, a)
# pylint: enable=g-long-lambda
def preserveStaticShape(self):
rt = ragged_factory_ops.constant([[1, 2], [], [3]])
rt_s = structure.type_spec_from_value(rt)
rt_after = structure.from_tensor_list(rt_s,
structure.to_tensor_list(rt_s, rt))
self.assertEqual(rt_after.row_splits.shape.as_list(),
rt.row_splits.shape.as_list())
self.assertEqual(rt_after.values.shape.as_list(), [None])
st = sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5])
st_s = structure.type_spec_from_value(st)
st_after = structure.from_tensor_list(st_s,
structure.to_tensor_list(st_s, st))
self.assertEqual(st_after.indices.shape.as_list(), [None, 2])
self.assertEqual(st_after.values.shape.as_list(), [None])
self.assertEqual(st_after.dense_shape.shape.as_list(),
st.dense_shape.shape.as_list())
def testPreserveTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.int32, size=1, element_shape=(3,))
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
def testPreserveInferredTensorArrayShape(self):
ta = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=1)
# Shape is inferred from the write.
ta = ta.write(0, [1, 2, 3])
ta_s = structure.type_spec_from_value(ta)
ta_after = structure.from_tensor_list(ta_s,
structure.to_tensor_list(ta_s, ta))
self.assertEqual(ta_after.element_shape.as_list(), [3])
def testIncompatibleStructure(self):
# Define three mutually incompatible values/structures, and assert that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_tensor = constant_op.constant(42.0)
s_tensor = structure.type_spec_from_value(value_tensor)
flat_tensor = structure.to_tensor_list(s_tensor, value_tensor)
value_sparse_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
s_sparse_tensor = structure.type_spec_from_value(value_sparse_tensor)
flat_sparse_tensor = structure.to_tensor_list(s_sparse_tensor,
value_sparse_tensor)
value_nest = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_nest = structure.type_spec_from_value(value_nest)
flat_nest = structure.to_tensor_list(s_nest, value_nest)
with self.assertRaisesRegex(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*float32.* and shape \(\)"):
structure.to_tensor_list(s_tensor, value_sparse_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_tensor, value_nest)
with self.assertRaisesRegex(TypeError,
"Neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_sparse_tensor, value_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_sparse_tensor, value_nest)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_tensor)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_nest, value_sparse_tensor)
with self.assertRaisesRegex(ValueError, r"Incompatible input:"):
structure.from_tensor_list(s_tensor, flat_sparse_tensor)
with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_tensor, flat_nest)
with self.assertRaisesRegex(ValueError, "Incompatible input: "):
structure.from_tensor_list(s_sparse_tensor, flat_tensor)
with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
structure.from_tensor_list(s_sparse_tensor, flat_nest)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_tensor)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
structure.from_tensor_list(s_nest, flat_sparse_tensor)
def testIncompatibleNestedStructure(self):
# Define three mutually incompatible nested values/structures, and assert
# that:
# 1. Using one structure to flatten a value with an incompatible structure
# fails.
# 2. Using one structure to restructure a flattened value with an
# incompatible structure fails.
value_0 = {
"a": constant_op.constant(37.0),
"b": constant_op.constant([1, 2, 3])
}
s_0 = structure.type_spec_from_value(value_0)
flat_s_0 = structure.to_tensor_list(s_0, value_0)
# `value_1` has compatible nested structure with `value_0`, but different
# classes.
value_1 = {
"a":
constant_op.constant(37.0),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1])
}
s_1 = structure.type_spec_from_value(value_1)
flat_s_1 = structure.to_tensor_list(s_1, value_1)
# `value_2` has incompatible nested structure with `value_0` and `value_1`.
value_2 = {
"a":
constant_op.constant(37.0),
"b": (sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
sparse_tensor.SparseTensor(
indices=[[3, 4]], values=[-1], dense_shape=[4, 5]))
}
s_2 = structure.type_spec_from_value(value_2)
flat_s_2 = structure.to_tensor_list(s_2, value_2)
with self.assertRaisesRegex(
ValueError, r"SparseTensor.* is not convertible to a tensor with "
r"dtype.*int32.* and shape \(3,\)"):
structure.to_tensor_list(s_0, value_1)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_0, value_2)
with self.assertRaisesRegex(TypeError,
"Neither a SparseTensor nor SparseTensorValue"):
structure.to_tensor_list(s_1, value_0)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_1, value_2)
# NOTE(mrry): The repr of the dictionaries is not sorted, so the regexp
# needs to account for "a" coming before or after "b". It might be worth
# adding a deterministic repr for these error messages (among other
# improvements).
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_0)
with self.assertRaisesRegex(
ValueError, "The two structures don't have the same nested structure."):
structure.to_tensor_list(s_2, value_1)
with self.assertRaisesRegex(ValueError, r"Incompatible input:"):
structure.from_tensor_list(s_0, flat_s_1)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3."):
structure.from_tensor_list(s_0, flat_s_2)
with self.assertRaisesRegex(ValueError, "Incompatible input: "):
structure.from_tensor_list(s_1, flat_s_0)
with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3."):
structure.from_tensor_list(s_1, flat_s_2)
with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2."):
structure.from_tensor_list(s_2, flat_s_0)
with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2."):
structure.from_tensor_list(s_2, flat_s_1)
@parameterized.named_parameters(
("Tensor", dtypes.float32, tensor_shape.TensorShape(
[]), ops.Tensor, tensor_spec.TensorSpec([], dtypes.float32)),
("SparseTensor", dtypes.int32, tensor_shape.TensorShape(
[2, 2]), sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorSpec([2, 2], dtypes.int32)),
("TensorArray_0", dtypes.int32,
tensor_shape.TensorShape([None, True, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=None, infer_shape=True)),
("TensorArray_1", dtypes.int32,
tensor_shape.TensorShape([True, None, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=True, infer_shape=None)),
("TensorArray_2", dtypes.int32,
tensor_shape.TensorShape([True, False, 2, 2
]), tensor_array_ops.TensorArray,
tensor_array_ops.TensorArraySpec(
[2, 2], dtypes.int32, dynamic_size=True, infer_shape=False)),
("RaggedTensor", dtypes.int32, tensor_shape.TensorShape([2, None]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32, 1),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32, 1)),
("Nested", {
"a": dtypes.float32,
"b": (dtypes.int32, dtypes.string)
}, {
"a": tensor_shape.TensorShape([]),
"b": (tensor_shape.TensorShape([2, 2]), tensor_shape.TensorShape([]))
}, {
"a": ops.Tensor,
"b": (sparse_tensor.SparseTensor, ops.Tensor)
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec(
[2, 2], dtypes.int32), tensor_spec.TensorSpec([], dtypes.string))
}),
)
def testConvertLegacyStructure(self, output_types, output_shapes,
output_classes, expected_structure):
actual_structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self.assertEqual(actual_structure, expected_structure)
def testNestedNestedStructure(self):
s = (tensor_spec.TensorSpec([], dtypes.int64),
(tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.string)))
int64_t = constant_op.constant(37, dtype=dtypes.int64)
float32_t = constant_op.constant(42.0)
string_t = constant_op.constant("Foo")
nested_tensors = (int64_t, (float32_t, string_t))
tensor_list = structure.to_tensor_list(s, nested_tensors)
for expected, actual in zip([int64_t, float32_t, string_t], tensor_list):
self.assertIs(expected, actual)
(actual_int64_t,
(actual_float32_t,
actual_string_t)) = structure.from_tensor_list(s, tensor_list)
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
(actual_int64_t, (actual_float32_t, actual_string_t)) = (
structure.from_compatible_tensor_list(s, tensor_list))
self.assertIs(int64_t, actual_int64_t)
self.assertIs(float32_t, actual_float32_t)
self.assertIs(string_t, actual_string_t)
@parameterized.named_parameters(
("Tensor", tensor_spec.TensorSpec([], dtypes.float32), 32,
tensor_spec.TensorSpec([32], dtypes.float32)),
("TensorUnknown", tensor_spec.TensorSpec([], dtypes.float32), None,
tensor_spec.TensorSpec([None], dtypes.float32)),
("SparseTensor", sparse_tensor.SparseTensorSpec([None], dtypes.float32),
32, sparse_tensor.SparseTensorSpec([32, None], dtypes.float32)),
("SparseTensorUnknown",
sparse_tensor.SparseTensorSpec([4], dtypes.float32), None,
sparse_tensor.SparseTensorSpec([None, 4], dtypes.float32)),
("RaggedTensor",
ragged_tensor.RaggedTensorSpec([2, None], dtypes.float32, 1), 32,
ragged_tensor.RaggedTensorSpec([32, 2, None], dtypes.float32, 2)),
("RaggedTensorUnknown",
ragged_tensor.RaggedTensorSpec([4, None], dtypes.float32, 1), None,
ragged_tensor.RaggedTensorSpec([None, 4, None], dtypes.float32, 2)),
("Nested", {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([2, 2], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.string))
}, 128, {
"a":
tensor_spec.TensorSpec([128], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([128, 2, 2], dtypes.int32),
tensor_spec.TensorSpec([128], dtypes.string))
}),
)
def testBatch(self, element_structure, batch_size,
expected_batched_structure):
batched_structure = nest.map_structure(
lambda component_spec: component_spec._batch(batch_size),
element_structure)
self.assertEqual(batched_structure, expected_batched_structure)
@parameterized.named_parameters(
("Tensor", tensor_spec.TensorSpec(
[32], dtypes.float32), tensor_spec.TensorSpec([], dtypes.float32)),
("TensorUnknown", tensor_spec.TensorSpec(
[None], dtypes.float32), tensor_spec.TensorSpec([], dtypes.float32)),
("SparseTensor", sparse_tensor.SparseTensorSpec([32, None],
dtypes.float32),
sparse_tensor.SparseTensorSpec([None], dtypes.float32)),
("SparseTensorUnknown",
sparse_tensor.SparseTensorSpec([None, 4], dtypes.float32),
sparse_tensor.SparseTensorSpec([4], dtypes.float32)),
("RaggedTensor",
ragged_tensor.RaggedTensorSpec([32, None, None], dtypes.float32, 2),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32, 1)),
("RaggedTensorUnknown",
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.float32, 2),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32, 1)),
("Nested", {
"a":
tensor_spec.TensorSpec([128], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec([128, 2, 2], dtypes.int32),
tensor_spec.TensorSpec([None], dtypes.string))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (sparse_tensor.SparseTensorSpec(
[2, 2], dtypes.int32), tensor_spec.TensorSpec([], dtypes.string))
}),
)
def testUnbatch(self, element_structure, expected_unbatched_structure):
unbatched_structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), element_structure)
self.assertEqual(unbatched_structure, expected_unbatched_structure)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
lambda: constant_op.constant([1.0, 2.0])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2])),
("RaggedTensor", lambda: ragged_factory_ops.constant([[[1]], [[2]]]),
lambda: ragged_factory_ops.constant([[1]])),
("Nest", lambda:
(constant_op.constant([[1.0, 2.0], [3.0, 4.0]]),
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[13, 27], dense_shape=[2, 2])),
lambda: (constant_op.constant([1.0, 2.0]),
sparse_tensor.SparseTensor(
indices=[[0]], values=[13], dense_shape=[2]))),
)
def testToBatchedTensorList(self, value_fn, element_0_fn):
batched_value = value_fn()
s = structure.type_spec_from_value(batched_value)
batched_tensor_list = structure.to_batched_tensor_list(s, batched_value)
# The batch dimension is 2 for all of the test cases.
# NOTE(mrry): `tf.shape()` does not currently work for the DT_VARIANT
# tensors in which we store sparse tensors.
for t in batched_tensor_list:
if t.dtype != dtypes.variant:
self.assertEqual(2, self.evaluate(array_ops.shape(t)[0]))
# Test that the 0th element from the unbatched tensor is equal to the
# expected value.
expected_element_0 = self.evaluate(element_0_fn())
unbatched_s = nest.map_structure(
lambda component_spec: component_spec._unbatch(), s)
actual_element_0 = structure.from_tensor_list(
unbatched_s, [t[0] for t in batched_tensor_list])
for expected, actual in zip(
nest.flatten(expected_element_0), nest.flatten(actual_element_0)):
self.assertValuesEqual(expected, actual)
# pylint: enable=g-long-lambda
def testDatasetSpecConstructor(self):
rt_spec = ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32)
st_spec = sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)
t_spec = tensor_spec.TensorSpec([10, 8], dtypes.string)
element_spec = {"rt": rt_spec, "st": st_spec, "t": t_spec}
ds_struct = dataset_ops.DatasetSpec(element_spec, [5])
self.assertEqual(ds_struct._element_spec, element_spec)
# Note: shape was automatically converted from a list to a TensorShape.
self.assertEqual(ds_struct._dataset_shape, tensor_shape.TensorShape([5]))
def testCustomMapping(self):
elem = CustomMap(foo=constant_op.constant(37.))
spec = structure.type_spec_from_value(elem)
self.assertIsInstance(spec, CustomMap)
self.assertEqual(spec["foo"], tensor_spec.TensorSpec([], dtypes.float32))
def testObjectProxy(self):
nt_type = collections.namedtuple("A", ["x", "y"])
proxied = wrapt.ObjectProxy(nt_type(1, 2))
proxied_spec = structure.type_spec_from_value(proxied)
self.assertEqual(structure.type_spec_from_value(nt_type(1, 2)),
proxied_spec)
class CustomMap(collections_abc.Mapping):
"""Custom, immutable map."""
def __init__(self, *args, **kwargs):
self.__dict__.update(dict(*args, **kwargs))
def __getitem__(self, x):
return self.__dict__[x]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.servicedirectory_v1.types import lookup_service
from .base import LookupServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import LookupServiceGrpcTransport
class LookupServiceGrpcAsyncIOTransport(LookupServiceTransport):
"""gRPC AsyncIO backend transport for LookupService.
Service Directory API for looking up service data at runtime.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def resolve_service(
self,
) -> Callable[
[lookup_service.ResolveServiceRequest],
Awaitable[lookup_service.ResolveServiceResponse],
]:
r"""Return a callable for the resolve service method over gRPC.
Returns a [service][google.cloud.servicedirectory.v1.Service]
and its associated endpoints. Resolving a service is not
considered an active developer method.
Returns:
Callable[[~.ResolveServiceRequest],
Awaitable[~.ResolveServiceResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resolve_service" not in self._stubs:
self._stubs["resolve_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1.LookupService/ResolveService",
request_serializer=lookup_service.ResolveServiceRequest.serialize,
response_deserializer=lookup_service.ResolveServiceResponse.deserialize,
)
return self._stubs["resolve_service"]
def close(self):
return self.grpc_channel.close()
__all__ = ("LookupServiceGrpcAsyncIOTransport",)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.servicedirectory_v1beta1.types import endpoint
from google.cloud.servicedirectory_v1beta1.types import endpoint as gcs_endpoint
from google.cloud.servicedirectory_v1beta1.types import namespace
from google.cloud.servicedirectory_v1beta1.types import namespace as gcs_namespace
from google.cloud.servicedirectory_v1beta1.types import registration_service
from google.cloud.servicedirectory_v1beta1.types import service
from google.cloud.servicedirectory_v1beta1.types import service as gcs_service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import RegistrationServiceTransport, DEFAULT_CLIENT_INFO
class RegistrationServiceGrpcTransport(RegistrationServiceTransport):
"""gRPC backend transport for RegistrationService.
Service Directory API for registering services. It defines the
following resource model:
- The API has a collection of
[Namespace][google.cloud.servicedirectory.v1beta1.Namespace]
resources, named ``projects/*/locations/*/namespaces/*``.
- Each Namespace has a collection of
[Service][google.cloud.servicedirectory.v1beta1.Service]
resources, named
``projects/*/locations/*/namespaces/*/services/*``.
- Each Service has a collection of
[Endpoint][google.cloud.servicedirectory.v1beta1.Endpoint]
resources, named
``projects/*/locations/*/namespaces/*/services/*/endpoints/*``.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def create_namespace(
self,
) -> Callable[
[registration_service.CreateNamespaceRequest], gcs_namespace.Namespace
]:
r"""Return a callable for the create namespace method over gRPC.
Creates a namespace, and returns the new namespace.
Returns:
Callable[[~.CreateNamespaceRequest],
~.Namespace]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_namespace" not in self._stubs:
self._stubs["create_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateNamespace",
request_serializer=registration_service.CreateNamespaceRequest.serialize,
response_deserializer=gcs_namespace.Namespace.deserialize,
)
return self._stubs["create_namespace"]
@property
def list_namespaces(
self,
) -> Callable[
[registration_service.ListNamespacesRequest],
registration_service.ListNamespacesResponse,
]:
r"""Return a callable for the list namespaces method over gRPC.
Lists all namespaces.
Returns:
Callable[[~.ListNamespacesRequest],
~.ListNamespacesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_namespaces" not in self._stubs:
self._stubs["list_namespaces"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListNamespaces",
request_serializer=registration_service.ListNamespacesRequest.serialize,
response_deserializer=registration_service.ListNamespacesResponse.deserialize,
)
return self._stubs["list_namespaces"]
@property
def get_namespace(
self,
) -> Callable[[registration_service.GetNamespaceRequest], namespace.Namespace]:
r"""Return a callable for the get namespace method over gRPC.
Gets a namespace.
Returns:
Callable[[~.GetNamespaceRequest],
~.Namespace]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_namespace" not in self._stubs:
self._stubs["get_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetNamespace",
request_serializer=registration_service.GetNamespaceRequest.serialize,
response_deserializer=namespace.Namespace.deserialize,
)
return self._stubs["get_namespace"]
@property
def update_namespace(
self,
) -> Callable[
[registration_service.UpdateNamespaceRequest], gcs_namespace.Namespace
]:
r"""Return a callable for the update namespace method over gRPC.
Updates a namespace.
Returns:
Callable[[~.UpdateNamespaceRequest],
~.Namespace]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_namespace" not in self._stubs:
self._stubs["update_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateNamespace",
request_serializer=registration_service.UpdateNamespaceRequest.serialize,
response_deserializer=gcs_namespace.Namespace.deserialize,
)
return self._stubs["update_namespace"]
@property
def delete_namespace(
self,
) -> Callable[[registration_service.DeleteNamespaceRequest], empty_pb2.Empty]:
r"""Return a callable for the delete namespace method over gRPC.
Deletes a namespace. This also deletes all services
and endpoints in the namespace.
Returns:
Callable[[~.DeleteNamespaceRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_namespace" not in self._stubs:
self._stubs["delete_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteNamespace",
request_serializer=registration_service.DeleteNamespaceRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_namespace"]
@property
def create_service(
self,
) -> Callable[[registration_service.CreateServiceRequest], gcs_service.Service]:
r"""Return a callable for the create service method over gRPC.
Creates a service, and returns the new service.
Returns:
Callable[[~.CreateServiceRequest],
~.Service]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_service" not in self._stubs:
self._stubs["create_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateService",
request_serializer=registration_service.CreateServiceRequest.serialize,
response_deserializer=gcs_service.Service.deserialize,
)
return self._stubs["create_service"]
@property
def list_services(
self,
) -> Callable[
[registration_service.ListServicesRequest],
registration_service.ListServicesResponse,
]:
r"""Return a callable for the list services method over gRPC.
Lists all services belonging to a namespace.
Returns:
Callable[[~.ListServicesRequest],
~.ListServicesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_services" not in self._stubs:
self._stubs["list_services"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListServices",
request_serializer=registration_service.ListServicesRequest.serialize,
response_deserializer=registration_service.ListServicesResponse.deserialize,
)
return self._stubs["list_services"]
@property
def get_service(
self,
) -> Callable[[registration_service.GetServiceRequest], service.Service]:
r"""Return a callable for the get service method over gRPC.
Gets a service.
Returns:
Callable[[~.GetServiceRequest],
~.Service]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_service" not in self._stubs:
self._stubs["get_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetService",
request_serializer=registration_service.GetServiceRequest.serialize,
response_deserializer=service.Service.deserialize,
)
return self._stubs["get_service"]
@property
def update_service(
self,
) -> Callable[[registration_service.UpdateServiceRequest], gcs_service.Service]:
r"""Return a callable for the update service method over gRPC.
Updates a service.
Returns:
Callable[[~.UpdateServiceRequest],
~.Service]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_service" not in self._stubs:
self._stubs["update_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateService",
request_serializer=registration_service.UpdateServiceRequest.serialize,
response_deserializer=gcs_service.Service.deserialize,
)
return self._stubs["update_service"]
@property
def delete_service(
self,
) -> Callable[[registration_service.DeleteServiceRequest], empty_pb2.Empty]:
r"""Return a callable for the delete service method over gRPC.
Deletes a service. This also deletes all endpoints
associated with the service.
Returns:
Callable[[~.DeleteServiceRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_service" not in self._stubs:
self._stubs["delete_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteService",
request_serializer=registration_service.DeleteServiceRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_service"]
@property
def create_endpoint(
self,
) -> Callable[[registration_service.CreateEndpointRequest], gcs_endpoint.Endpoint]:
r"""Return a callable for the create endpoint method over gRPC.
Creates an endpoint, and returns the new endpoint.
Returns:
Callable[[~.CreateEndpointRequest],
~.Endpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_endpoint" not in self._stubs:
self._stubs["create_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateEndpoint",
request_serializer=registration_service.CreateEndpointRequest.serialize,
response_deserializer=gcs_endpoint.Endpoint.deserialize,
)
return self._stubs["create_endpoint"]
@property
def list_endpoints(
self,
) -> Callable[
[registration_service.ListEndpointsRequest],
registration_service.ListEndpointsResponse,
]:
r"""Return a callable for the list endpoints method over gRPC.
Lists all endpoints.
Returns:
Callable[[~.ListEndpointsRequest],
~.ListEndpointsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_endpoints" not in self._stubs:
self._stubs["list_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListEndpoints",
request_serializer=registration_service.ListEndpointsRequest.serialize,
response_deserializer=registration_service.ListEndpointsResponse.deserialize,
)
return self._stubs["list_endpoints"]
@property
def get_endpoint(
self,
) -> Callable[[registration_service.GetEndpointRequest], endpoint.Endpoint]:
r"""Return a callable for the get endpoint method over gRPC.
Gets an endpoint.
Returns:
Callable[[~.GetEndpointRequest],
~.Endpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_endpoint" not in self._stubs:
self._stubs["get_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetEndpoint",
request_serializer=registration_service.GetEndpointRequest.serialize,
response_deserializer=endpoint.Endpoint.deserialize,
)
return self._stubs["get_endpoint"]
@property
def update_endpoint(
self,
) -> Callable[[registration_service.UpdateEndpointRequest], gcs_endpoint.Endpoint]:
r"""Return a callable for the update endpoint method over gRPC.
Updates an endpoint.
Returns:
Callable[[~.UpdateEndpointRequest],
~.Endpoint]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_endpoint" not in self._stubs:
self._stubs["update_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateEndpoint",
request_serializer=registration_service.UpdateEndpointRequest.serialize,
response_deserializer=gcs_endpoint.Endpoint.deserialize,
)
return self._stubs["update_endpoint"]
@property
def delete_endpoint(
self,
) -> Callable[[registration_service.DeleteEndpointRequest], empty_pb2.Empty]:
r"""Return a callable for the delete endpoint method over gRPC.
Deletes an endpoint.
Returns:
Callable[[~.DeleteEndpointRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_endpoint" not in self._stubs:
self._stubs["delete_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteEndpoint",
request_serializer=registration_service.DeleteEndpointRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_endpoint"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM Policy for a resource (namespace or
service only).
Returns:
Callable[[~.GetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM Policy for a resource (namespace or
service only).
Returns:
Callable[[~.SetIamPolicyRequest],
~.Policy]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
iam_policy_pb2.TestIamPermissionsResponse,
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests IAM permissions for a resource (namespace or
service only).
Returns:
Callable[[~.TestIamPermissionsRequest],
~.TestIamPermissionsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
self.grpc_channel.close()
__all__ = ("RegistrationServiceGrpcTransport",)
|
|
import time
from requests import request, ConnectionError
from ..utils import SSLHttpAdapter, module_member, parse_qs, user_agent
from ..exceptions import AuthFailed
class BaseAuth(object):
"""A authentication backend that authenticates the user based on
the provider response"""
name = '' # provider name, it's stored in database
supports_inactive_user = False # Django auth
ID_KEY = None
EXTRA_DATA = None
REQUIRES_EMAIL_VALIDATION = False
SEND_USER_AGENT = False
SSL_PROTOCOL = None
def __init__(self, strategy, redirect_uri=None):
self.strategy = strategy
self.redirect_uri = redirect_uri
self.data = self.strategy.request_data()
self.redirect_uri = self.strategy.absolute_uri(
self.redirect_uri
)
def setting(self, name, default=None):
"""Return setting value from strategy"""
return self.strategy.setting(name, default=default, backend=self)
def start(self):
if self.uses_redirect():
return self.strategy.redirect(self.auth_url())
else:
return self.strategy.html(self.auth_html())
def complete(self, *args, **kwargs):
return self.auth_complete(*args, **kwargs)
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def process_error(self, data):
"""Process data for errors, raise exception if needed.
Call this method on any override of auth_complete."""
pass
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = self.strategy or kwargs.get('strategy')
self.redirect_uri = self.redirect_uri or kwargs.get('redirect_uri')
self.data = self.strategy.request_data()
kwargs.setdefault('is_new', False)
pipeline = self.strategy.get_pipeline(self)
args, kwargs = self.strategy.clean_authenticate_args(*args, **kwargs)
return self.pipeline(pipeline, *args, **kwargs)
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline(self)
kwargs['name'] = self.name
kwargs['user_storage'] = self.strategy.storage.user
return self.run_pipeline(pipeline, *args, **kwargs)
def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request_data())
out.setdefault('details', {})
for idx, name in enumerate(pipeline[pipeline_index:]):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
return out
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return default extra data to store in extra_data field"""
data = {
# store the last time authentication toke place
'auth_time': int(time.time())
}
for entry in (self.EXTRA_DATA or []) + self.setting('EXTRA_DATA', []):
if not isinstance(entry, (list, tuple)):
entry = (entry,)
size = len(entry)
if size >= 1 and size <= 3:
if size == 3:
name, alias, discard = entry
elif size == 2:
(name, alias), discard = entry, False
elif size == 1:
name = alias = entry[0]
discard = False
value = response.get(name) or details.get(name)
if discard and not value:
continue
data[alias] = value
return data
def auth_allowed(self, response, details):
"""Return True if the user should be allowed to authenticate, by
default check if email is whitelisted (if there's a whitelist)"""
emails = self.setting('WHITELISTED_EMAILS', [])
domains = self.setting('WHITELISTED_DOMAINS', [])
email = details.get('email')
allowed = True
if email and (emails or domains):
domain = email.split('@', 1)[1]
allowed = email in emails or domain in domains
return allowed
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get(self.ID_KEY)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
def get_user_names(self, fullname='', first_name='', last_name=''):
# Avoid None values
fullname = fullname or ''
first_name = first_name or ''
last_name = last_name or ''
if fullname and not (first_name or last_name):
try:
first_name, last_name = fullname.split(' ', 1)
except ValueError:
first_name = first_name or fullname or ''
last_name = last_name or ''
fullname = fullname or ' '.join((first_name, last_name))
return fullname.strip(), first_name.strip(), last_name.strip()
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
return self.strategy.get_user(user_id)
def continue_pipeline(self, partial):
"""Continue previous halted pipeline"""
return self.strategy.authenticate(self,
pipeline_index=partial.next_step,
*partial.args,
**partial.kwargs)
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process. The defaults can be
overridden by GET parameters."""
extra_arguments = self.setting('AUTH_EXTRA_ARGUMENTS', {}).copy()
extra_arguments.update((key, self.data[key]) for key in extra_arguments
if key in self.data)
return extra_arguments
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
def request(self, url, method='GET', *args, **kwargs):
kwargs.setdefault('headers', {})
if self.setting('VERIFY_SSL') is not None:
kwargs.setdefault('verify', self.setting('VERIFY_SSL'))
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']:
kwargs['headers']['User-Agent'] = self.setting('USER_AGENT') or \
user_agent()
try:
if self.SSL_PROTOCOL:
session = SSLHttpAdapter.ssl_adapter_session(self.SSL_PROTOCOL)
response = session.request(method, url, *args, **kwargs)
else:
response = request(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
response.raise_for_status()
return response
def get_json(self, url, *args, **kwargs):
return self.request(url, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, *args, **kwargs).text)
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return self.setting('KEY'), self.setting('SECRET')
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the robustness and resiliency of vtworkers."""
from collections import namedtuple
import urllib
import urllib2
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
KEYSPACE_ID_TYPE = keyrange_constants.KIT_UINT64
class ShardTablets(namedtuple('ShardTablets', 'master replicas rdonlys')):
"""ShardTablets is a container for all the tablet.Tablets of a shard.
`master` should be a single Tablet, while `replicas` and `rdonlys` should be
lists of Tablets of the appropriate types.
"""
@property
def all_tablets(self):
"""Returns a list of all the tablets of the shard.
Does not guarantee any ordering on the returned tablets.
Returns:
List of all tablets of the shard.
"""
return [self.master] + self.replicas + self.rdonlys
@property
def replica(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.replicas:
return self.replicas[0]
else:
return None
@property
def rdonly(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.rdonlys:
return self.rdonlys[0]
else:
return None
def __str__(self):
return """master %s
replicas:
%s
rdonlys:
%s
""" % (self.master,
'\n'.join(' %s' % replica for replica in self.replicas),
'\n'.join(' %s' % rdonly for rdonly in self.rdonlys))
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1])
shard_0_tablets = ShardTablets(
shard_0_master, [shard_0_replica], [shard_0_rdonly1])
shard_1_tablets = ShardTablets(
shard_1_master, [shard_1_replica], [shard_1_rdonly1])
def init_keyspace():
"""Creates a `test_keyspace` keyspace with a sharding key."""
utils.run_vtctl(
['CreateKeyspace', '-sharding_column_name', 'keyspace_id',
'-sharding_column_type', KEYSPACE_ID_TYPE, 'test_keyspace'])
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
init_keyspace()
logging.debug('environment set up with the following shards and tablets:')
logging.debug('=========================================================')
logging.debug('TABLETS: test_keyspace/0:\n%s', all_shard_tablets)
logging.debug('TABLETS: test_keyspace/-80:\n%s', shard_0_tablets)
logging.debug('TABLETS: test_keyspace/80-:\n%s', shard_1_tablets)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestBaseSplitClone(unittest.TestCase, base_sharding.BaseShardingTest):
"""Abstract test base class for testing the SplitClone worker."""
def __init__(self, *args, **kwargs):
super(TestBaseSplitClone, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows
def run_shard_tablets(
self, shard_name, shard_tablets, create_table=True):
"""Handles all the necessary work for initially running a shard's tablets.
This encompasses the following steps:
1. (optional) Create db
2. Starting vttablets and let themselves init them
3. Waiting for the appropriate vttablet state
4. Force reparent to the master tablet
5. RebuildKeyspaceGraph
7. (optional) Running initial schema setup
Args:
shard_name: the name of the shard to start tablets in
shard_tablets: an instance of ShardTablets for the given shard
create_table: boolean, True iff we should create a table on the tablets
"""
# Start tablets.
#
# NOTE: The future master has to be started with type 'replica'.
shard_tablets.master.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.replicas:
t.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.rdonlys:
t.start_vttablet(
wait_for_state=None, init_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
# Block until tablets are up and we can enable replication.
# All tables should be NOT_SERVING until we run InitShardMaster.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(
['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
shard_tablets.master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# Enforce a health check instead of waiting for the next periodic one.
# (saves up to 1 second execution time on average)
for t in shard_tablets.replicas:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in shard_tablets.rdonlys:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# Wait for tablet state to change after starting all tablets. This allows
# us to start all tablets at once, instead of sequentially waiting.
# NOTE: Replication has to be enabled first or the health check will
# set a replica or rdonly tablet back to NOT_SERVING.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('SERVING')
create_table_sql = (
'create table worker_test('
'id bigint unsigned,'
'msg varchar(64),'
'keyspace_id bigint(20) unsigned not null,'
'primary key (id),'
'index by_msg (msg)'
') Engine=InnoDB'
)
if create_table:
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_sql,
'test_keyspace'],
auto_log=True)
def copy_schema_to_destination_shards(self):
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
def _insert_values(self, vttablet, id_offset, msg, keyspace_id, num_values):
"""Inserts values into MySQL along with the required routing comments.
Args:
vttablet: the Tablet instance to modify.
id_offset: offset for the value of `id` column.
msg: the value of `msg` column.
keyspace_id: the value of `keyspace_id` column.
num_values: number of rows to be inserted.
"""
# For maximum performance, multiple values are inserted in one statement.
# However, when the statements are too long, queries will timeout and
# vttablet will kill them. Therefore, we chunk it into multiple statements.
def chunks(full_list, n):
"""Yield successive n-sized chunks from full_list."""
for i in xrange(0, len(full_list), n):
yield full_list[i:i+n]
max_chunk_size = 100*1000
k = utils.uint64_to_hex(keyspace_id)
for chunk in chunks(range(1, num_values+1), max_chunk_size):
logging.debug('Inserting values for range [%d, %d].', chunk[0], chunk[-1])
values_str = ''
for i in chunk:
if i != chunk[0]:
values_str += ','
values_str += "(%d, '%s', 0x%x)" % (id_offset + i, msg, keyspace_id)
vttablet.mquery(
'vt_test_keyspace', [
'begin',
'insert into worker_test(id, msg, keyspace_id) values%s '
'/* vtgate:: keyspace_id:%s */' % (values_str, k),
'commit'],
write=True)
def insert_values(self, vttablet, num_values, num_shards, offset=0,
keyspace_id_range=2**64):
"""Inserts simple values, one for each potential shard.
Each row is given a message that contains the shard number, so we can easily
verify that the source and destination shards have the same data.
Args:
vttablet: the Tablet instance to modify.
num_values: The number of values to insert.
num_shards: the number of shards that we expect to have.
offset: amount that we should offset the `id`s by. This is useful for
inserting values multiple times.
keyspace_id_range: the number of distinct values that the keyspace id
can have.
"""
shard_width = keyspace_id_range / num_shards
shard_offsets = [i * shard_width for i in xrange(num_shards)]
# TODO(mberlin): Change the "id" column values from the keyspace id to a
# counter starting at 1. The incrementing ids must
# alternate between the two shards. Without this, the
# vtworker chunking won't be well balanced across shards.
for shard_num in xrange(num_shards):
self._insert_values(
vttablet,
shard_offsets[shard_num] + offset,
'msg-shard-%d' % shard_num,
shard_offsets[shard_num],
num_values)
def assert_shard_data_equal(
self, shard_num, source_tablet, destination_tablet):
"""Asserts source and destination tablets have identical shard data.
Args:
shard_num: The shard number of the shard that we want to verify.
source_tablet: Tablet instance of the source shard.
destination_tablet: Tablet instance of the destination shard.
"""
select_query = (
'select * from worker_test where msg="msg-shard-%s" order by id asc' %
shard_num)
# Make sure all the right rows made it from the source to the destination
source_rows = source_tablet.mquery('vt_test_keyspace', select_query)
destination_rows = destination_tablet.mquery(
'vt_test_keyspace', select_query)
self.assertEqual(source_rows, destination_rows)
# Make sure that there are no extra rows on the destination
count_query = 'select count(*) from worker_test'
destination_count = destination_tablet.mquery(
'vt_test_keyspace', count_query)[0][0]
self.assertEqual(destination_count, len(destination_rows))
def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets):
"""Runs a vtworker SplitDiff on the given keyspace/shard.
Sets all former rdonly slaves back to rdonly.
Args:
keyspace_shard: keyspace/shard to run SplitDiff on (string)
source_tablets: ShardTablets instance for the source shard
destination_tablets: ShardTablets instance for the destination shard
"""
_ = source_tablets, destination_tablets
logging.debug('Running vtworker SplitDiff for %s', keyspace_shard)
_, _ = utils.run_vtworker(
['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
keyspace_shard], auto_log=True)
def setUp(self):
"""Creates shards, starts the tablets, and inserts some data."""
try:
self.run_shard_tablets('0', all_shard_tablets)
# create the split shards
self.run_shard_tablets(
'-80', shard_0_tablets, create_table=False)
self.run_shard_tablets(
'80-', shard_1_tablets, create_table=False)
logging.debug('Start inserting initial data: %s rows',
self.num_insert_rows)
self.insert_values(shard_master, self.num_insert_rows, 2)
logging.debug(
'Done inserting initial data, waiting for replication to catch up')
utils.wait_for_replication_pos(shard_master, shard_rdonly1)
logging.debug('Replication on source rdonly tablet is caught up')
except:
self.tearDown()
raise
def tearDown(self):
"""Does the minimum to reset topology and tablets to their initial states.
When benchmarked, this seemed to take around 30% of the time of
(setupModule + tearDownModule).
FIXME(aaijazi): doing this in parallel greatly reduces the time it takes.
See the kill_tablets method in tablet.py.
"""
for shard_tablet in [all_shard_tablets, shard_0_tablets, shard_1_tablets]:
for t in shard_tablet.all_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# _vt.vreplication should be dropped to avoid interference between
# test cases
t.mquery('', 'drop table if exists _vt.vreplication')
t.kill_vttablet()
# we allow failures here as some tablets will be gone sometimes
# (the master tablets after an emergency reparent)
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True, raise_on_error=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for shard in ['0', '-80', '80-']:
utils.run_vtctl(
['DeleteShard', '-even_if_serving', 'test_keyspace/%s' % shard],
auto_log=True)
class TestBaseSplitCloneResiliency(TestBaseSplitClone):
"""Tests that the SplitClone worker is resilient to particular failures."""
def setUp(self):
try:
super(TestBaseSplitCloneResiliency, self).setUp()
self.copy_schema_to_destination_shards()
except:
self.tearDown()
raise
def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
"""Verifies that vtworker can successfully copy data for a SplitClone.
Order of operations:
1. Run a background vtworker
2. Wait until the worker successfully resolves the destination masters.
3. Reparent the destination tablets
4. Wait until the vtworker copy is finished
5. Verify that the worker was forced to reresolve topology and retry writes
due to the reparent.
6. Verify that the data was copied successfully to both new shards
Args:
mysql_down: boolean. If True, we take down the MySQL instances on the
destination masters at first, then bring them back and reparent away.
Raises:
AssertionError if things didn't go as expected.
"""
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--use_v3_resharding_mode=false'],
auto_log=True)
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
# --chunk_count is 2 because rows are currently ordered by primary key such
# that all rows of the first shard come first and then the second shard.
# TODO(mberlin): Remove --offline=false once vtworker ensures that the
# destination shards are not behind the master's replication
# position.
args = ['SplitClone',
'--offline=false',
'--destination_writer_count', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999']
# Make the clone as slow as necessary such that there is enough time to
# run PlannedReparent in the meantime.
# TODO(mberlin): Once insert_values is fixed to uniformly distribute the
# rows across shards when sorted by primary key, remove
# --chunk_count 2, --min_rows_per_chunk 1 and set
# --source_reader_count back to 1.
args.extend(['--source_reader_count', '2',
'--chunk_count', '2',
'--min_rows_per_chunk', '1',
'--write_query_max_rows', '1'])
args.append('test_keyspace/0')
workerclient_proc = utils.run_vtworker_client_bg(args, worker_rpc_port)
if mysql_down:
# vtworker is blocked at this point. This is a good time to test that its
# throttler server is reacting to RPCs.
self.check_throttler_service('localhost:%d' % worker_rpc_port,
['test_keyspace/-80', 'test_keyspace/80-'],
9999)
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, Shutting down mysqld on destination masters.')
utils.wait_procs(
[shard_0_master.shutdown_mysql(),
shard_1_master.shutdown_mysql()])
# If MySQL is down, we wait until vtworker retried at least once to make
# sure it reached the point where a write failed due to MySQL being down.
# There should be two retries at least, one for each destination shard.
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerRetryCount >= 2',
condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
logging.debug('Worker has retried at least once per shard, starting reparent now')
# Bring back masters. Since we test with semi-sync now, we need at least
# one replica for the new master. This test is already quite expensive,
# so we bring back the old master as a replica rather than having a third
# replica up the whole time.
logging.debug('Restarting mysqld on destination masters')
utils.wait_procs(
[shard_0_master.start_mysql(),
shard_1_master.start_mysql()])
# Reparent away from the old masters.
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
else:
# NOTE: There is a race condition around this:
# It's possible that the SplitClone vtworker command finishes before the
# PlannedReparentShard vtctl command, which we start below, succeeds.
# Then the test would fail because vtworker did not have to retry.
#
# To workaround this, the test takes a parameter to increase the number of
# rows that the worker has to copy (with the idea being to slow the worker
# down).
# You should choose a value for num_insert_rows, such that this test
# passes for your environment (trial-and-error...)
# Make sure that vtworker got past the point where it picked a master
# for each destination shard ("finding targets" state).
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, starting reparent now')
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
utils.wait_procs([workerclient_proc])
# Verify that we were forced to re-resolve and retry.
worker_vars = utils.get_vars(worker_port)
self.assertGreater(worker_vars['WorkerRetryCount'], 1,
"expected vtworker to retry each of the two reparented"
" destination masters at least once, but it didn't")
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
utils.kill_sub_process(worker_proc, soft=True)
# Wait for the destination RDONLYs to catch up or the following offline
# clone will try to insert rows which already exist.
# TODO(mberlin): Remove this once SplitClone supports it natively.
utils.wait_for_replication_pos(shard_0_replica, shard_0_rdonly1)
utils.wait_for_replication_pos(shard_1_replica, shard_1_rdonly1)
# Run final offline clone to enable filtered replication.
_, _ = utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitClone',
'--online=false',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'], auto_log=True)
# Make sure that everything is caught up to the same replication point
self.run_split_diff('test_keyspace/-80', all_shard_tablets, shard_0_tablets)
self.run_split_diff('test_keyspace/80-', all_shard_tablets, shard_1_tablets)
self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
def __init__(self, *args, **kwargs):
super(TestReparentDuringWorkerCopy, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows_before_reparent_test
def test_reparent_during_worker_copy(self):
"""Simulates a destination reparent during a worker SplitClone copy.
The SplitClone command should be able to gracefully handle the reparent and
end up with the correct data on the destination.
Note: this test has a small possibility of flaking, due to the timing issues
involved. It's possible for the worker to finish the copy step before the
reparent succeeds, in which case there are assertions that will fail. This
seems better than having the test silently pass.
"""
self.verify_successful_worker_copy_with_reparent()
class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency):
def test_mysql_down_during_worker_copy(self):
"""This test simulates MySQL being down on the destination masters."""
self.verify_successful_worker_copy_with_reparent(mysql_down=True)
class TestVtworkerWebinterface(unittest.TestCase):
def setUp(self):
# Run vtworker without any optional arguments to start in interactive mode.
self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
def tearDown(self):
utils.kill_sub_process(self.worker_proc)
def test_webinterface(self):
worker_base_url = 'http://localhost:%d' % int(self.worker_port)
# Wait for /status to become available.
timeout = 10
while True:
done = False
try:
urllib2.urlopen(worker_base_url + '/status').read()
done = True
except urllib2.URLError:
pass
if done:
break
timeout = utils.wait_step(
'worker /status webpage must be available', timeout)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
# Run Ping command.
try:
urllib2.urlopen(
worker_base_url + '/Debugging/Ping',
data=urllib.urlencode({'message': 'pong'})).read()
raise Exception('Should have thrown an HTTPError for the redirect.')
except urllib2.HTTPError as e:
self.assertEqual(e.code, 307)
# Wait for the Ping command to finish.
utils.poll_for_vars(
'vtworker', self.worker_port,
'WorkerState == done',
condition_fn=lambda v: v.get('WorkerState') == 'done')
# Verify that the command logged something and it's available at /status.
status = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
"Ping command was called with message: 'pong'", status,
'Command did not log output to /status: %s' % status)
# Reset the job.
urllib2.urlopen(worker_base_url + '/reset').read()
status_after_reset = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
'This worker is idle.', status_after_reset,
'/status does not indicate that the reset was successful')
class TestMinHealthyRdonlyTablets(TestBaseSplitCloneResiliency):
def split_clone_fails_not_enough_health_rdonly_tablets(self):
"""Verify vtworker errors if there aren't enough healthy RDONLY tablets."""
_, stderr = utils.run_vtworker(
['-cell', 'test_nj',
'--wait_for_healthy_rdonly_tablets_timeout', '1s',
'--use_v3_resharding_mode=false',
'SplitClone',
'--min_healthy_rdonly_tablets', '2',
'test_keyspace/0'],
auto_log=True,
expect_fail=True)
self.assertIn('findTargets() failed: FindWorkerTablet() failed for'
' test_nj/test_keyspace/0: not enough healthy RDONLY'
' tablets to choose from in (test_nj,test_keyspace/0),'
' have 1 healthy ones, need at least 2', stderr)
def add_test_options(parser):
parser.add_option(
'--num_insert_rows', type='int', default=100,
help='The number of rows, per shard, that we should insert before '
'resharding for this test.')
parser.add_option(
'--num_insert_rows_before_reparent_test', type='int', default=4500,
help='The number of rows, per shard, that we should insert before '
'running TestReparentDuringWorkerCopy (supersedes --num_insert_rows in '
'that test). There must be enough rows such that SplitClone takes '
'several seconds to run while we run a planned reparent.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
|
|
from test import support
import array
import io
import marshal
import sys
import unittest
import os
import types
try:
import _testcapi
except ImportError:
_testcapi = None
class HelperMixin:
def helper(self, sample, *extra):
new = marshal.loads(marshal.dumps(sample, *extra))
self.assertEqual(sample, new)
try:
with open(support.TESTFN, "wb") as f:
marshal.dump(sample, f, *extra)
with open(support.TESTFN, "rb") as f:
new = marshal.load(f)
self.assertEqual(sample, new)
finally:
support.unlink(support.TESTFN)
class IntTestCase(unittest.TestCase, HelperMixin):
def test_ints(self):
# Test a range of Python ints larger than the machine word size.
n = sys.maxsize ** 2
while n:
for expected in (-n, n):
self.helper(expected)
n = n >> 1
def test_bool(self):
for b in (True, False):
self.helper(b)
class FloatTestCase(unittest.TestCase, HelperMixin):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxsize * 3.7e250
while n > small:
for expected in (-n, n):
self.helper(float(expected))
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxsize * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
self.helper(f)
self.helper(f, 1)
n *= 123.4567
class StringTestCase(unittest.TestCase, HelperMixin):
def test_unicode(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(marshal.loads(marshal.dumps(s)))
def test_string(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(s)
def test_bytes(self):
for s in [b"", b"Andr\xe8 Previn", b"abc", b" "*10000]:
self.helper(s)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.__code__
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
def test_many_codeobjects(self):
# Issue2957: bad recursion count on code objects
count = 5000 # more than MAX_MARSHAL_STACK_DEPTH
codes = (ExceptionTestCase.test_exceptions.__code__,) * count
marshal.loads(marshal.dumps(codes))
def test_different_filenames(self):
co1 = compile("x", "f1", "exec")
co2 = compile("y", "f2", "exec")
co1, co2 = marshal.loads(marshal.dumps((co1, co2)))
self.assertEqual(co1.co_filename, "f1")
self.assertEqual(co2.co_filename, "f2")
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
co = compile(s, "myfile", "exec")
co = marshal.loads(marshal.dumps(co))
for obj in co.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(co.co_filename, obj.co_filename)
class ContainerTestCase(unittest.TestCase, HelperMixin):
d = {'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': "Andr\xe8 Previn"
}
def test_dict(self):
self.helper(self.d)
def test_list(self):
self.helper(list(self.d.items()))
def test_tuple(self):
self.helper(tuple(self.d.keys()))
def test_sets(self):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
@support.cpython_only
def test_empty_frozenset_singleton(self):
# marshal.loads() must reuse the empty frozenset singleton
obj = frozenset()
obj2 = marshal.loads(marshal.dumps(obj))
self.assertIs(obj2, obj)
class BufferTestCase(unittest.TestCase, HelperMixin):
def test_bytearray(self):
b = bytearray(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_memoryview(self):
b = memoryview(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_array(self):
a = array.array('B', b"abc")
new = marshal.loads(marshal.dumps(a))
self.assertEqual(new, b"abc")
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(2**65)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_2x_code(self):
s = b'c' + (b'X' * 4*4) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_loads_recursion(self):
s = b'c' + (b'X' * 4*5) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
@support.impl_detail('specific recursion check')
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
if os.name == 'nt' and hasattr(sys, 'gettotalrefcount'):
MAX_MARSHAL_STACK_DEPTH = 1000
else:
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, float, complex, tuple, list, dict, set, frozenset):
# Note: str subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = b'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_multiple_dumps_and_loads(self):
# Issue 12291: marshal.load() should be callable multiple times
# with interleaved data written by non-marshal code
# Adapted from a patch by Engelbert Gruber.
data = (1, 'abc', b'def', 1.0, (2, 'a', ['b', b'c']))
for interleaved in (b'', b'0123'):
ilen = len(interleaved)
positions = []
try:
with open(support.TESTFN, 'wb') as f:
for d in data:
marshal.dump(d, f)
if ilen:
f.write(interleaved)
positions.append(f.tell())
with open(support.TESTFN, 'rb') as f:
for i, d in enumerate(data):
self.assertEqual(d, marshal.load(f))
if ilen:
f.read(ilen)
self.assertEqual(positions[i], f.tell())
finally:
support.unlink(support.TESTFN)
def test_loads_reject_unicode_strings(self):
# Issue #14177: marshal.loads() should not accept unicode strings
unicode_string = 'T'
self.assertRaises(TypeError, marshal.loads, unicode_string)
def test_bad_reader(self):
class BadReader(io.BytesIO):
def readinto(self, buf):
n = super().readinto(buf)
if n is not None and n > 4:
n += 10**6
return n
def read(self, n): # PyPy calls read(), not readinto()
result = super().read(n)
if len(result) > 4:
result += b'\x00' * (10**6)
return result
for value in (1.0, 1j, b'0123456789', '0123456789'):
self.assertRaises(ValueError, marshal.load,
BadReader(marshal.dumps(value)))
def _test_eof(self):
data = marshal.dumps(("hello", "dolly", None))
for i in range(len(data)):
self.assertRaises(EOFError, marshal.loads, data[0: i])
LARGE_SIZE = 2**31
pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4
if support.check_impl_detail(pypy=False):
sizeof_large_size = sys.getsizeof(LARGE_SIZE-1)
else:
sizeof_large_size = 32 # Some value for PyPy
class NullWriter:
def write(self, s):
pass
@unittest.skipIf(LARGE_SIZE > sys.maxsize, "test cannot run on 32-bit systems")
class LargeValuesTestCase(unittest.TestCase):
def check_unmarshallable(self, data):
self.assertRaises(ValueError, marshal.dump, data, NullWriter())
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytes(self, size):
self.check_unmarshallable(b'x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_str(self, size):
self.check_unmarshallable('x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_tuple(self, size):
self.check_unmarshallable((None,) * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_list(self, size):
self.check_unmarshallable([None] * size)
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sizeof_large_size,
dry_run=False)
def test_set(self, size):
self.check_unmarshallable(set(range(size)))
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sizeof_large_size,
dry_run=False)
def test_frozenset(self, size):
self.check_unmarshallable(frozenset(range(size)))
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytearray(self, size):
self.check_unmarshallable(bytearray(size))
def CollectObjectIDs(ids, obj):
"""Collect object ids seen in a structure"""
if id(obj) in ids:
return
ids.add(id(obj))
if isinstance(obj, (list, tuple, set, frozenset)):
for e in obj:
CollectObjectIDs(ids, e)
elif isinstance(obj, dict):
for k, v in obj.items():
CollectObjectIDs(ids, k)
CollectObjectIDs(ids, v)
return len(ids)
class InstancingTestCase(unittest.TestCase, HelperMixin):
intobj = 123321
floatobj = 1.2345
strobj = "abcde"*3
dictobj = {"hello":floatobj, "goodbye":floatobj, floatobj:"hello"}
def helper3(self, rsample, recursive=False, simple=False,
check_sharing=True, check_non_sharing=True):
#we have two instances
sample = (rsample, rsample)
n0 = CollectObjectIDs(set(), sample)
s3 = marshal.dumps(sample, 3)
n3 = CollectObjectIDs(set(), marshal.loads(s3))
#same number of instances generated
# except in one corner case on top of pypy, for code objects
if check_sharing:
self.assertEqual(n3, n0)
if not recursive:
#can compare with version 2
s2 = marshal.dumps(sample, 2)
n2 = CollectObjectIDs(set(), marshal.loads(s2))
#old format generated more instances
# except on pypy where equal ints or floats always have
# the same id anyway
if check_non_sharing:
self.assertGreater(n2, n0)
#if complex objects are in there, old format is larger
if check_non_sharing and not simple:
self.assertGreater(len(s2), len(s3))
else:
self.assertGreaterEqual(len(s2), len(s3))
def testInt(self):
self.helper(self.intobj)
self.helper3(self.intobj, simple=True,
check_non_sharing=support.check_impl_detail())
def testFloat(self):
self.helper(self.floatobj)
self.helper3(self.floatobj,
check_non_sharing=support.check_impl_detail())
def testStr(self):
self.helper(self.strobj)
self.helper3(self.strobj)
def testDict(self):
self.helper(self.dictobj)
self.helper3(self.dictobj)
def testModule(self):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
self.helper(code)
self.helper3(code, check_sharing=support.check_impl_detail())
def testRecursion(self):
d = dict(self.dictobj)
d["self"] = d
self.helper3(d, recursive=True)
l = [self.dictobj]
l.append(l)
self.helper3(l, recursive=True)
class CompatibilityTestCase(unittest.TestCase):
def _test(self, version):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
data = marshal.dumps(code, version)
marshal.loads(data)
def test0To3(self):
self._test(0)
def test1To3(self):
self._test(1)
def test2To3(self):
self._test(2)
def test3To3(self):
self._test(3)
class InterningTestCase(unittest.TestCase, HelperMixin):
strobj = "this is an interned string"
strobj = sys.intern(strobj)
def testIntern(self):
s = marshal.loads(marshal.dumps(self.strobj))
self.assertEqual(s, self.strobj)
self.assertEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertEqual(id(s2), id(s))
def testNoIntern(self):
s = marshal.loads(marshal.dumps(self.strobj, 2))
self.assertEqual(s, self.strobj)
self.assertNotEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertNotEqual(id(s2), id(s))
@support.cpython_only
@unittest.skipUnless(_testcapi, 'requires _testcapi')
class CAPI_TestCase(unittest.TestCase, HelperMixin):
def test_write_long_to_file(self):
for v in range(marshal.version + 1):
_testcapi.pymarshal_write_long_to_file(0x12345678, support.TESTFN, v)
with open(support.TESTFN, 'rb') as f:
data = f.read()
support.unlink(support.TESTFN)
self.assertEqual(data, b'\x78\x56\x34\x12')
def test_write_object_to_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j, 'long line '*1000)
for v in range(marshal.version + 1):
_testcapi.pymarshal_write_object_to_file(obj, support.TESTFN, v)
with open(support.TESTFN, 'rb') as f:
data = f.read()
support.unlink(support.TESTFN)
self.assertEqual(marshal.loads(data), obj)
def test_read_short_from_file(self):
with open(support.TESTFN, 'wb') as f:
f.write(b'\x34\x12xxxx')
r, p = _testcapi.pymarshal_read_short_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, 0x1234)
self.assertEqual(p, 2)
with open(support.TESTFN, 'wb') as f:
f.write(b'\x12')
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_short_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_long_from_file(self):
with open(support.TESTFN, 'wb') as f:
f.write(b'\x78\x56\x34\x12xxxx')
r, p = _testcapi.pymarshal_read_long_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, 0x12345678)
self.assertEqual(p, 4)
with open(support.TESTFN, 'wb') as f:
f.write(b'\x56\x34\x12')
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_long_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_last_object_from_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j)
for v in range(marshal.version + 1):
data = marshal.dumps(obj, v)
with open(support.TESTFN, 'wb') as f:
f.write(data + b'xxxx')
r, p = _testcapi.pymarshal_read_last_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, obj)
with open(support.TESTFN, 'wb') as f:
f.write(data[:1])
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_last_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
def test_read_object_from_file(self):
obj = ('\u20ac', b'abc', 123, 45.6, 7+8j)
for v in range(marshal.version + 1):
data = marshal.dumps(obj, v)
with open(support.TESTFN, 'wb') as f:
f.write(data + b'xxxx')
r, p = _testcapi.pymarshal_read_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
self.assertEqual(r, obj)
self.assertEqual(p, len(data))
with open(support.TESTFN, 'wb') as f:
f.write(data[:1])
with self.assertRaises(EOFError):
_testcapi.pymarshal_read_object_from_file(support.TESTFN)
support.unlink(support.TESTFN)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTObject
class NUPGExpressionTemplate(NURESTObject):
""" Represents a PGExpressionTemplate in the VSD
Notes:
Policy Group Expression Template is an expression consisting of policy groups defined at Domain Template or L2 Domain Template
"""
__rest_name__ = "pgexpressiontemplate"
__resource_name__ = "pgexpressiontemplates"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a PGExpressionTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> pgexpressiontemplate = NUPGExpressionTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'PGExpressionTemplate')
>>> pgexpressiontemplate = NUPGExpressionTemplate(data=my_dict)
"""
super(NUPGExpressionTemplate, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._description = None
self._entity_scope = None
self._creation_date = None
self._owner = None
self._expression = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=True)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="expression", remote_name="expression", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Policy Group Expression Template
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Policy Group Expression Template
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def description(self):
""" Get description value.
Notes:
Description of the Policy Group Expression Template
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the Policy Group Expression Template
"""
self._description = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def expression(self):
""" Get expression value.
Notes:
Actual Policy Group Expression like (PG1 || PG2) && !PG3. Allowed operators are && (AND), ! (NOT), II (OR) and ( )
"""
return self._expression
@expression.setter
def expression(self, value):
""" Set expression value.
Notes:
Actual Policy Group Expression like (PG1 || PG2) && !PG3. Allowed operators are && (AND), ! (NOT), II (OR) and ( )
"""
self._expression = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# coding=utf-8
# Copyright 2022 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tokenization ops for BERT preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow_text.python.ops import regex_split_ops
from tensorflow_text.python.ops.normalize_ops import case_fold_utf8
from tensorflow_text.python.ops.normalize_ops import normalize_utf8
from tensorflow_text.python.ops.tokenization import Detokenizer
from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets
from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer
_tf_text_bert_tokenizer_op_create_counter = monitoring.Counter(
"/nlx/api/python/bert_tokenizer_create_counter",
"Counter for number of BertTokenizers created in Python.")
_DELIM_REGEX = [
r"\s+",
r"|".join([
r"[!-/]",
r"[:-@]",
r"[\[-`]",
r"[{-~]",
r"[\p{P}]",
]),
r"|".join([
r"[\x{4E00}-\x{9FFF}]",
r"[\x{3400}-\x{4DBF}]",
r"[\x{20000}-\x{2A6DF}]",
r"[\x{2A700}-\x{2B73F}]",
r"[\x{2B740}-\x{2B81F}]",
r"[\x{2B820}-\x{2CEAF}]",
r"[\x{F900}-\x{FAFF}]",
r"[\x{2F800}-\x{2FA1F}]",
]),
]
_DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)
_KEEP_DELIM_NO_WHITESPACE.remove(r"\s+")
_UNUSED_TOKEN_REGEX = "\\[unused\\d+\\]"
_KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE)
class BasicTokenizer(TokenizerWithOffsets):
r"""Basic tokenizer for for tokenizing text.
A basic tokenizer that tokenizes using some deterministic rules:
- For most languages, this tokenizer will split on whitespace.
- For Chinese, Japanese, and Korean characters, this tokenizer will split on
Unicode characters.
Example:
>>> text_inputs = [b'taste the rustisc indiefrost']
>>> tokenizer = BasicTokenizer(
... lower_case=False, normalization_form='NFC')
>>> tokenizer.tokenize(text_inputs)
<tf.RaggedTensor [[b'taste', b'the', b'rustisc', b'indiefrost']]>
Attributes:
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, which also applies NFD normalization and strip accents from
characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If set to a valid value and lower_case=False, the input
text will be normalized to `normalization_form`. See normalize_utf8() op
for a list of valid values.
preserve_unused_token: If true, text in the regex format "\\[unused\\d+\\]"
will be treated as a token and thus remain preserved as-is to be looked up
in the vocabulary.
"""
def __init__(self,
lower_case=False,
keep_whitespace=False,
normalization_form=None,
preserve_unused_token=False):
self._lower_case = lower_case
if not keep_whitespace:
self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN
else:
self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN
if lower_case and normalization_form not in [None, "NFD"]:
raise ValueError("`lower_case` strips accents. When `lower_case` is set, "
"`normalization_form` is 'NFD'.")
self._normalization_form = normalization_form
if preserve_unused_token:
self._delim_regex_pattern = "|".join(
[_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN])
self._keep_delim_regex_pattern = "|".join(
[_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern])
else:
self._delim_regex_pattern = _DELIM_REGEX_PATTERN
def tokenize(self, text_input):
tokens, _, _ = self.tokenize_with_offsets(text_input)
return tokens
def tokenize_with_offsets(self, text_input):
"""Performs basic word tokenization for BERT.
Args:
text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.
Returns:
A `RaggedTensor` of tokenized strings from text_input.
"""
# lowercase and strip accents (if option is set)
if self._lower_case:
text_input = self.lower_case(text_input)
else:
# utf8 normalization
if self._normalization_form is not None:
text_input = normalize_utf8(text_input, self._normalization_form)
# strip out control characters
text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ")
return regex_split_ops.regex_split_with_offsets(
text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern,
"BertBasicTokenizer")
def lower_case(self, text_input):
"""Lower-cases the `text_input'."""
text_input = case_fold_utf8(text_input)
text_input = normalize_utf8(text_input, "NFD")
text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "")
return text_input
class AccentPreservingBasicTokenizer(BasicTokenizer):
"""I18n-friendly tokenizer that keeps accent characters during lowercasing."""
def __init__(self, *args, **kwargs):
super(AccentPreservingBasicTokenizer, self).__init__(*args, **kwargs)
def lower_case(self, text_input):
return string_ops.string_lower(text_input, encoding="utf-8")
class BertTokenizer(TokenizerWithOffsets, Detokenizer):
r"""Tokenizer used for BERT.
This tokenizer applies an end-to-end, text string to wordpiece tokenization.
It first applies basic tokenization, followed by wordpiece
tokenization.
See `WordpieceTokenizer` for details on the subword tokenization.
For an example of use, see
https://www.tensorflow.org/text/guide/bert_preprocessing_guide
Attributes:
vocab_lookup_table: A lookup table implementing the LookupInterface
containing the vocabulary of subwords or a string which is the file path
to the vocab.txt file.
suffix_indicator: (optional) The characters prepended to a wordpiece to
indicate that it is a suffix to another subword. Default is '##'.
max_bytes_per_word: (optional) Max size of input token. Default is 100.
max_chars_per_token: (optional) Max size of subwords, excluding suffix
indicator. If known, providing this improves the efficiency of decoding
long words.
token_out_type: (optional) The type of the token to return. This can be
`tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.
unknown_token: (optional) The value to use when an unknown token is found.
Default is "[UNK]". If this is set to a string, and `token_out_type` is
`tf.int64`, the `vocab_lookup_table` is used to convert the
`unknown_token` to an integer. If this is set to `None`, out-of-vocabulary
tokens are left as is.
split_unknown_characters: (optional) Whether to split out single unknown
characters as subtokens. If False (default), words containing unknown
characters will be treated as single unknown tokens.
lower_case: bool - If true, a preprocessing step is added to lowercase the
text, apply NFD normalization, and strip accents characters.
keep_whitespace: bool - If true, preserves whitespace characters instead of
stripping them away.
normalization_form: If set to a valid value and lower_case=False, the input
text will be normalized to `normalization_form`. See normalize_utf8() op
for a list of valid values.
preserve_unused_token: If true, text in the regex format `\\[unused\\d+\\]`
will be treated as a token and thus remain preserved as is to be looked up
in the vocabulary.
basic_tokenizer_class: If set, the class to use instead of BasicTokenizer
"""
def __init__(self,
vocab_lookup_table,
suffix_indicator="##",
max_bytes_per_word=100,
max_chars_per_token=None,
token_out_type=dtypes.int64,
unknown_token="[UNK]",
split_unknown_characters=False,
lower_case=False,
keep_whitespace=False,
normalization_form=None,
preserve_unused_token=False,
basic_tokenizer_class=BasicTokenizer):
super(BertTokenizer, self).__init__()
_tf_text_bert_tokenizer_op_create_counter.get_cell().increase_by(1)
self._basic_tokenizer = basic_tokenizer_class(lower_case, keep_whitespace,
normalization_form,
preserve_unused_token)
self._wordpiece_tokenizer = WordpieceTokenizer(
vocab_lookup_table, suffix_indicator, max_bytes_per_word,
max_chars_per_token, token_out_type, unknown_token,
split_unknown_characters)
def tokenize_with_offsets(self, text_input):
r"""Tokenizes a tensor of string tokens into subword tokens for BERT.
Example:
>>> import pathlib
>>> pathlib.Path('/tmp/tok_vocab.txt').write_text(
... "they ##' ##re the great ##est".replace(' ', '\n'))
>>> tokenizer = BertTokenizer(
... vocab_lookup_table='/tmp/tok_vocab.txt')
>>> text_inputs = tf.constant(['greatest'.encode('utf-8')])
>>> tokenizer.tokenize_with_offsets(text_inputs)
(<tf.RaggedTensor [[[4, 5]]]>,
<tf.RaggedTensor [[[0, 5]]]>,
<tf.RaggedTensor [[[5, 8]]]>)
Args:
text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8
strings.
Returns:
A tuple of `RaggedTensor`s where the first element is the tokens where
`tokens[i1...iN, j]`, the second element is the starting offsets, the
third element is the end offset. (Please look at `tokenize` for details
on tokens.)
"""
tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)
wordpieces, wp_begin, wp_end = (
self._wordpiece_tokenizer.tokenize_with_offsets(tokens))
begin_expanded = array_ops.expand_dims(begin, axis=2)
final_begin = begin_expanded + wp_begin
final_end = begin_expanded + wp_end
return wordpieces, final_begin, final_end
def tokenize(self, text_input):
r"""Tokenizes a tensor of string tokens into subword tokens for BERT.
Example:
>>> import pathlib
>>> pathlib.Path('/tmp/tok_vocab.txt').write_text(
... "they ##' ##re the great ##est".replace(' ', '\n'))
>>> tokenizer = BertTokenizer(
... vocab_lookup_table='/tmp/tok_vocab.txt')
>>> text_inputs = tf.constant(['greatest'.encode('utf-8') ])
>>> tokenizer.tokenize(text_inputs)
<tf.RaggedTensor [[[4, 5]]]>
Args:
text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8
strings.
Returns:
A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string
contents (or ID in the vocab_lookup_table representing that string)
of the `jth` token in `input[i1...iN]`
"""
tokens = self._basic_tokenizer.tokenize(text_input)
return self._wordpiece_tokenizer.tokenize(tokens)
def detokenize(self, token_ids):
r"""Convert a `Tensor` or `RaggedTensor` of wordpiece IDs to string-words.
See `WordpieceTokenizer.detokenize` for details.
Note: `BertTokenizer.tokenize`/`BertTokenizer.detokenize` does not round
trip losslessly. The result of `detokenize` will not, in general, have the
same content or offsets as the input to `tokenize`. This is because the
"basic tokenization" step, that splits the strings into words before
applying the `WordpieceTokenizer`, includes irreversible
steps like lower-casing and splitting on punctuation. `WordpieceTokenizer`
on the other hand **is** reversible.
Note: This method assumes wordpiece IDs are dense on the interval
`[0, vocab_size)`.
Example:
>>> import pathlib
>>> pathlib.Path('/tmp/tok_vocab.txt').write_text(
... "they ##' ##re the great ##est".replace(' ', '\n'))
>>> tokenizer = BertTokenizer(
... vocab_lookup_table='/tmp/tok_vocab.txt')
>>> text_inputs = tf.constant(['greatest'.encode('utf-8')])
>>> tokenizer.detokenize([[4, 5]])
<tf.RaggedTensor [[b'greatest']]>
Args:
token_ids: A `RaggedTensor` or `Tensor` with an int dtype.
Returns:
A `RaggedTensor` with dtype `string` and the same rank as the input
`token_ids`.
"""
return self._wordpiece_tokenizer.detokenize(token_ids)
|
|
import os
import logging
import datetime
from collections import defaultdict
from google.appengine.ext import ndb
from base_controller import LoggedInHandler
from consts.account_permissions import AccountPermissions
from consts.client_type import ClientType
from consts.model_type import ModelType
from consts.notification_type import NotificationType
from helpers.event_helper import EventHelper
from helpers.mytba_helper import MyTBAHelper
from helpers.notification_helper import NotificationHelper
from helpers.validation_helper import ValidationHelper
from models.account import Account
from models.event import Event
from models.favorite import Favorite
from models.sitevar import Sitevar
from models.subscription import Subscription
from models.suggestion import Suggestion
from models.team import Team
from template_engine import jinja2_engine
import tba_config
class AccountOverview(LoggedInHandler):
def get(self):
redirect = self.request.get('redirect')
if redirect:
self._require_login(redirect)
else:
self._require_login('/account')
# Redirects to registration page if account not registered
self._require_registration('/account/register')
push_sitevar = Sitevar.get_by_id('notifications.enable')
if push_sitevar is None or not push_sitevar.values_json == "true":
ping_enabled = "disabled"
else:
ping_enabled = ""
# Compute myTBA statistics
user = self.user_bundle.account.key
num_favorites = Favorite.query(ancestor=user).count()
num_subscriptions = Subscription.query(ancestor=user).count()
# Compute suggestion statistics
submissions_pending = Suggestion.query(Suggestion.review_state==Suggestion.REVIEW_PENDING, Suggestion.author==user).count()
submissions_accepted = Suggestion.query(Suggestion.review_state==Suggestion.REVIEW_ACCEPTED, Suggestion.author==user).count()
# Suggestion review statistics
review_permissions = False
num_reviewed = 0
total_pending = 0
if AccountPermissions.MUTATE_DATA in self.user_bundle.account.permissions:
review_permissions = True
num_reviewed = Suggestion.query(Suggestion.reviewer==user).count()
total_pending = Suggestion.query(Suggestion.review_state==Suggestion.REVIEW_PENDING).count()
self.template_values['status'] = self.request.get('status')
self.template_values['webhook_verification_success'] = self.request.get('webhook_verification_success')
self.template_values['ping_enabled'] = ping_enabled
self.template_values['num_favorites'] = num_favorites
self.template_values['num_subscriptions'] = num_subscriptions
self.template_values['submissions_pending'] = submissions_pending
self.template_values['submissions_accepted'] = submissions_accepted
self.template_values['review_permissions'] = review_permissions
self.template_values['num_reviewed'] = num_reviewed
self.template_values['total_pending'] = total_pending
self.response.out.write(jinja2_engine.render('account_overview.html', self.template_values))
class AccountEdit(LoggedInHandler):
def get(self):
self._require_login('/account/edit')
self._require_registration('/account/register')
self.response.out.write(jinja2_engine.render('account_edit.html', self.template_values))
def post(self):
self._require_login('/account/edit')
self._require_registration('/account/register')
# Check to make sure that they aren't trying to edit another user
real_account_id = self.user_bundle.account.key.id()
check_account_id = self.request.get('account_id')
if check_account_id == real_account_id:
user = Account.get_by_id(self.user_bundle.account.key.id())
user.display_name = self.request.get('display_name')
user.put()
self.redirect('/account?status=account_edit_success')
else:
self.redirect('/account?status=account_edit_failure')
class AccountRegister(LoggedInHandler):
def get(self):
self._require_login('/account/register')
# Redirects to account overview page if already registered
if self.user_bundle.account.registered:
self.redirect('/account')
return None
self.response.out.write(jinja2_engine.render('account_register.html', self.template_values))
def post(self):
self._require_login('/account/register')
if self.user_bundle.account.registered:
self.redirect('/account')
return None
# Check to make sure that they aren't trying to edit another user
real_account_id = self.user_bundle.account.key.id()
check_account_id = self.request.get('account_id')
if check_account_id == real_account_id:
account = Account.get_by_id(self.user_bundle.account.key.id())
account.display_name = self.request.get('display_name')
account.registered = True
account.put()
self.redirect('/account')
else:
self.redirect('/')
class AccountLogout(LoggedInHandler):
def get(self):
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development/'):
self.redirect(self.user_bundle.logout_url)
return
# Deletes the session cookies pertinent to TBA without touching Google session(s)
# Reference: http://ptspts.blogspot.ca/2011/12/how-to-log-out-from-appengine-app-only.html
response = self.redirect('/')
response.delete_cookie('ACSID')
response.delete_cookie('SACSID')
return response
class MyTBAController(LoggedInHandler):
def get(self):
self._require_login('/account/register')
self._require_registration('/account/register')
user = self.user_bundle.account.key
favorites = Favorite.query(ancestor=user).fetch()
subscriptions = Subscription.query(ancestor=user).fetch()
team_keys = set()
team_fav = {}
team_subs = {}
event_keys = set()
event_fav = {}
event_subs = {}
events = []
for item in favorites + subscriptions:
if item.model_type == ModelType.TEAM:
team_keys.add(ndb.Key(Team, item.model_key))
if type(item) == Favorite:
team_fav[item.model_key] = item
elif type(item) == Subscription:
team_subs[item.model_key] = item
elif item.model_type == ModelType.EVENT:
if item.model_key.endswith('*'): # All year events wildcard
event_year = int(item.model_key[:-1])
events.append(Event( # add fake event for rendering
id=item.model_key,
short_name='ALL EVENTS',
event_short=item.model_key,
year=event_year,
start_date=datetime.datetime(event_year, 1, 1),
end_date=datetime.datetime(event_year, 1, 1)
))
else:
event_keys.add(ndb.Key(Event, item.model_key))
if type(item) == Favorite:
event_fav[item.model_key] = item
elif type(item) == Subscription:
event_subs[item.model_key] = item
team_futures = ndb.get_multi_async(team_keys)
event_futures = ndb.get_multi_async(event_keys)
teams = sorted([team_future.get_result() for team_future in team_futures], key=lambda x: x.team_number)
team_fav_subs = []
for team in teams:
fav = team_fav.get(team.key.id(), None)
subs = team_subs.get(team.key.id(), None)
team_fav_subs.append((team, fav, subs))
events += [event_future.get_result() for event_future in event_futures]
EventHelper.sort_events(events)
event_fav_subs = []
for event in events:
fav = event_fav.get(event.key.id(), None)
subs = event_subs.get(event.key.id(), None)
event_fav_subs.append((event, fav, subs))
self.template_values['team_fav_subs'] = team_fav_subs
self.template_values['event_fav_subs'] = event_fav_subs
self.template_values['status'] = self.request.get('status')
self.template_values['year'] = datetime.datetime.now().year
self.response.out.write(jinja2_engine.render('mytba.html', self.template_values))
class MyTBAEventController(LoggedInHandler):
def get(self, event_key):
self._require_login('/account/register')
self._require_registration('/account/register')
# Handle wildcard for all events in a year
event = None
is_wildcard = False
if event_key.endswith('*'):
try:
year = int(event_key[:-1])
except:
year = None
if year and year >= 1992 and year <= tba_config.MAX_YEAR:
event = Event( # fake event for rendering
name="ALL {} EVENTS".format(year),
year=year,
)
is_wildcard = True
else:
event = Event.get_by_id(event_key)
if not event:
self.abort(404)
user = self.user_bundle.account.key
favorite = Favorite.query(Favorite.model_key==event_key, Favorite.model_type==ModelType.EVENT, ancestor=user).get()
subscription = Subscription.query(Favorite.model_key==event_key, Favorite.model_type==ModelType.EVENT, ancestor=user).get()
if not favorite and not subscription: # New entry; default to being a favorite
is_favorite = True
else:
is_favorite = favorite is not None
enabled_notifications = [(en, NotificationType.render_names[en]) for en in NotificationType.enabled_event_notifications]
self.template_values['event'] = event
self.template_values['is_wildcard'] = is_wildcard
self.template_values['is_favorite'] = is_favorite
self.template_values['subscription'] = subscription
self.template_values['enabled_notifications'] = enabled_notifications
self.response.out.write(jinja2_engine.render('mytba_event.html', self.template_values))
def post(self, event_key):
self._require_login('/account/register')
self._require_registration('/account/register')
current_user_id = self.user_bundle.account.key.id()
if self.request.get('favorite'):
favorite = Favorite(
parent=ndb.Key(Account, current_user_id),
user_id=current_user_id,
model_type=ModelType.EVENT,
model_key=event_key
)
MyTBAHelper.add_favorite(favorite)
else:
MyTBAHelper.remove_favorite(current_user_id, event_key)
subs = self.request.get_all('notification_types')
if subs:
subscription = Subscription(
parent=ndb.Key(Account, current_user_id),
user_id=current_user_id,
model_type=ModelType.EVENT,
model_key=event_key,
notification_types=[int(s) for s in subs]
)
MyTBAHelper.add_subscription(subscription)
else:
MyTBAHelper.remove_subscription(current_user_id, event_key)
self.redirect('/account/mytba?status=event_updated#my-events')
class MyTBATeamController(LoggedInHandler):
def get(self, team_number):
self._require_login('/account/register')
self._require_registration('/account/register')
team_key = 'frc{}'.format(team_number)
team = Team.get_by_id(team_key)
if not team:
self.abort(404)
user = self.user_bundle.account.key
favorite = Favorite.query(Favorite.model_key==team_key, Favorite.model_type==ModelType.TEAM, ancestor=user).get()
subscription = Subscription.query(Favorite.model_key==team_key, Favorite.model_type==ModelType.TEAM, ancestor=user).get()
if not favorite and not subscription: # New entry; default to being a favorite
is_favorite = True
else:
is_favorite = favorite is not None
enabled_notifications = [(en, NotificationType.render_names[en]) for en in NotificationType.enabled_team_notifications]
self.template_values['team'] = team
self.template_values['is_favorite'] = is_favorite
self.template_values['subscription'] = subscription
self.template_values['enabled_notifications'] = enabled_notifications
self.response.out.write(jinja2_engine.render('mytba_team.html', self.template_values))
def post(self, team_number):
self._require_login('/account/register')
self._require_registration('/account/register')
current_user_id = self.user_bundle.account.key.id()
team_key = 'frc{}'.format(team_number)
if self.request.get('favorite'):
favorite = Favorite(
parent=ndb.Key(Account, current_user_id),
user_id=current_user_id,
model_type=ModelType.TEAM,
model_key=team_key
)
MyTBAHelper.add_favorite(favorite)
else:
MyTBAHelper.remove_favorite(current_user_id, team_key)
subs = self.request.get_all('notification_types')
if subs:
subscription = Subscription(
parent=ndb.Key(Account, current_user_id),
user_id=current_user_id,
model_type=ModelType.TEAM,
model_key=team_key,
notification_types=[int(s) for s in subs]
)
MyTBAHelper.add_subscription(subscription)
else:
MyTBAHelper.remove_subscription(current_user_id, team_key)
self.redirect('/account/mytba?status=team_updated#my-teams')
|
|
##
# Copyright (c) 2005-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.ical import Component, normalize_iCalStr
from txdav.common.datastore.test.util import CommonCommonTests, populateCalendarsFrom
from twisted.trial.unittest import TestCase
from twext.python.clsprop import classproperty
from twistedcaldav.config import config
from txdav.common.icommondatastore import ObjectResourceTooBigError, \
InvalidObjectResourceError, InvalidComponentForStoreError, InvalidUIDError, \
UIDExistsError, UIDExistsElsewhereError
from txdav.caldav.icalendarstore import InvalidComponentTypeError, \
TooManyAttendeesError, InvalidCalendarAccessError, ComponentUpdateState, \
DuplicatePrivateCommentsError
from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
from txdav.caldav.datastore.sql import CalendarObject
class ImplicitRequests (CommonCommonTests, TestCase):
"""
Test twistedcaldav.scheduyling.implicit with a Request object.
"""
@inlineCallbacks
def setUp(self):
yield super(ImplicitRequests, self).setUp()
yield self.buildStoreAndDirectory()
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): #@NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
}
def storeUnderTest(self):
"""
Create and return a L{CalendarStore} for testing.
"""
return self._sqlCalendarStore
@inlineCallbacks
def test_doCreateResource(self):
"""
Test that resource creation works.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar1 = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar1)
yield self.commit()
calendar_resource1 = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource1.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("urn:x-uid:user01" in calendar1)
self.assertTrue("urn:x-uid:user02" in calendar1)
self.assertTrue("CN=" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_maxResourceSize(self):
"""
Test that various types of invalid calendar data are rejected when creating a resource.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
self.patch(config, "MaxResourceSize", 100)
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar1 = Component.fromString(data1)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar1), ObjectResourceTooBigError)
yield self.commit()
self.patch(config, "MaxResourceSize", 10000)
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar1 = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar1)
yield self.commit()
self.patch(config, "MaxResourceSize", 100)
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar2 = Component.fromString(data2)
yield self.failUnlessFailure(calendar_resource.setComponent(calendar2), ObjectResourceTooBigError)
yield self.commit()
@inlineCallbacks
def test_validation_validCalendarDataCheck(self):
"""
Test that various types of invalid calendar data are rejected when creating a resource.
"""
data = (
"xyz",
Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:1
SUMMARY:2
END:VEVENT
END:VCALENDAR
"""),
Component.fromString("""BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
METHOD:PUBLISH
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""),
)
for item in data:
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = item
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar), InvalidObjectResourceError, InvalidComponentForStoreError)
yield self.commit()
@inlineCallbacks
def test_validation_validSupportedComponentType(self):
"""
Test that resources are restricted by component type.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar_collection.setSupportedComponents("VTODO")
calendar = Component.fromString(data1)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar), InvalidComponentTypeError)
yield self.commit()
@inlineCallbacks
def test_validation_validAttendeeListSizeCheck(self):
"""
Test that resource with too many attendees are rejected.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
ATTENDEE:mailto:user03@example.com
ATTENDEE:mailto:user04@example.com
ATTENDEE:mailto:user05@example.com
END:VEVENT
END:VCALENDAR
"""
self.patch(config, "MaxAttendeesPerInstance", 2)
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar), TooManyAttendeesError)
yield self.commit()
@inlineCallbacks
def test_validation_validAccess_invalidValue(self):
"""
Test that resource access mode changes are rejected.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
X-CALENDARSERVER-ACCESS:BOGUS
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
self.patch(config, "EnablePrivateEvents", True)
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar), InvalidCalendarAccessError)
yield self.commit()
@inlineCallbacks
def test_validation_validAccess_authzChangeNotAllowed(self):
"""
Test that resource access mode changes are rejected.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
X-CALENDARSERVER-ACCESS:PRIVATE
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
self.patch(config, "EnablePrivateEvents", True)
txn = self.transactionUnderTest()
txn._authz_uid = "user02"
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test.ics", calendar), InvalidCalendarAccessError)
yield self.commit()
# This one should be OK
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
# This one should re-insert access mode
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("X-CALENDARSERVER-ACCESS:PRIVATE" in calendar1)
self.assertTrue("SUMMARY:Changed" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_overwriteUID(self):
"""
Test that a change to a resource UID is not allowed.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
# This one should fail
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply-1
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield self.failUnlessFailure(calendar_resource.setComponent(calendar), InvalidUIDError)
yield self.commit()
@inlineCallbacks
def test_validation_duplicateUIDSameCalendar(self):
"""
Test that a resource with a duplicate UID in the same calendar is not allowed.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
# This one should fail
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data2)
yield self.failUnlessFailure(calendar_collection.createCalendarObjectWithName("test2.ics", calendar), UIDExistsError)
yield self.commit()
@inlineCallbacks
def test_validation_duplicateUIDDifferentCalendar(self):
"""
Test that a resource with a duplicate UID in a different calendar is not allowed.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
# This one should fail
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
home_collection = (yield self.homeUnderTest(name="user01"))
calendar_collection_2 = (yield home_collection.createCalendarWithName("calendar_2"))
calendar = Component.fromString(data2)
yield self.failUnlessFailure(calendar_collection_2.createCalendarObjectWithName("test2.ics", calendar), UIDExistsElsewhereError)
yield self.commit()
@inlineCallbacks
def test_validation_noPreservePrivateComments(self):
"""
Test that attendee private comments are no longer restored.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
X-CALENDARSERVER-PRIVATE-COMMENT:My Comment
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertFalse("X-CALENDARSERVER-PRIVATE-COMMENT:My Comment" in calendar1)
self.assertTrue("SUMMARY:Changed" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_preserveOrganizerPrivateComments(self):
"""
Test that organizer private comments are restored.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-organizer
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:x-uid:user01";
X-CALENDARSERVER-DTSTAMP=20131101T100000Z:Someone else's comment
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-organizer
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VEVENT
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF=\"urn:x-uid:user01\";X-CALENDARSERVER-DTSTAMP=20131101T100000Z:Someone else's comment" in calendar1)
self.assertTrue("SUMMARY:Changed" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_replaceMissingToDoProperties_OrganizerAttendee(self):
"""
Test that missing scheduling properties in VTODOs are recovered.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTODO
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VTODO
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTODO
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
END:VTODO
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("ORGANIZER" in calendar1)
self.assertTrue("ATTENDEE" in calendar1)
self.assertTrue("SUMMARY:Changed" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_replaceMissingToDoProperties_Completed(self):
"""
Test that VTODO completed status is fixed.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTODO
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VTODO
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTODO
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
COMPLETED:20080601T140000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VTODO
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("ORGANIZER" in calendar1)
self.assertTrue("ATTENDEE" in calendar1)
self.assertTrue("SUMMARY:Changed" in calendar1)
self.assertTrue("PARTSTAT=COMPLETED" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_dropboxPathNormalization(self):
"""
Test that dropbox paths are normalized.
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
sharee_home = (yield self.homeUnderTest(name="user02"))
shared_name = (yield calendar_collection.shareWith(sharee_home, _BIND_MODE_WRITE,))
yield self.commit()
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VTODO
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
X-APPLE-DROPBOX:https://example.com/calendars/users/user02/dropbox/123.dropbox
ATTACH;VALUE=URI:https://example.com/calendars/users/user02/dropbox/123.dropbox/1.txt
ATTACH;VALUE=URI:https://example.org/attachments/2.txt
END:VTODO
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(name=shared_name, home="user02"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", calendar_name=shared_name, home="user02",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("X-APPLE-DROPBOX:https://example.com/calendars/__uids__/user01/dropbox/123.dropbox" in calendar1)
self.assertTrue("ATTACH:https://example.com/calendars/__uids__/user01/dropbox/123.dropbox/1.txt" in calendar1)
self.assertTrue("ATTACH:https://example.org/attachments/2.txt" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_processAlarms_DuplicateRemoval(self):
"""
Test that duplicate alarms are removed.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
SUMMARY:Changed
BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT8M
ACTION:DISPLAY
END:VALARM
BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT8M
ACTION:DISPLAY
END:VALARM
END:VEVENT
END:VCALENDAR
"""
txn = self.transactionUnderTest()
txn._authz_uid = "user01"
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
result = (yield calendar_resource.setComponent(calendar))
yield self.commit()
self.assertTrue(result)
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
self.assertTrue("SUMMARY:Changed" in calendar1)
yield self.commit()
@inlineCallbacks
def test_validation_processAlarms_AddDefault(self):
"""
Test that default alarms are added.
"""
alarm = """BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT8M
ACTION:DISPLAY
END:VALARM
"""
home = (yield self.homeUnderTest(name="user01"))
yield home.setDefaultAlarm(alarm, True, True)
yield self.commit()
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
yield self.commit()
@inlineCallbacks
def test_validation_processAlarms_NoDefaultShared(self):
"""
Test that default alarms are not added to shared resources.
"""
# Set default alarm for user02
alarm = """BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT8M
ACTION:DISPLAY
END:VALARM
"""
home = (yield self.homeUnderTest(name="user02"))
yield home.setDefaultAlarm(alarm, True, True)
yield self.commit()
# user01 shares calendar with user02
calendar_collection = (yield self.calendarUnderTest(home="user01"))
sharee_home = (yield self.homeUnderTest(name="user02"))
shared_name = (yield calendar_collection.shareWith(sharee_home, _BIND_MODE_WRITE,))
yield self.commit()
# user02 writes event to shared calendar
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(name=shared_name, home="user02"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", calendar_name=shared_name, home="user02",))
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertEqual(calendar1.count("BEGIN:VALARM"), 0)
yield self.commit()
@inlineCallbacks
def test_validation_mergePerUserData(self):
"""
Test that per-user data is correctly stored and retrieved.
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
sharee_home = (yield self.homeUnderTest(name="user02"))
shared_name = (yield calendar_collection.shareWith(sharee_home, _BIND_MODE_WRITE,))
yield self.commit()
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT5M
ACTION:DISPLAY
END:VALARM
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
BEGIN:VALARM
X-WR-ALARMUID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
UID:D9D1AC84-F629-4B9D-9B6B-4A6CA9A11FEF
DESCRIPTION:Event reminder
TRIGGER:-PT10M
ACTION:DISPLAY
END:VALARM
END:VEVENT
END:VCALENDAR
"""
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", calendar_name=shared_name, home="user02",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
# Unfiltered view of event
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("TRIGGER:-PT5M" in calendar1)
self.assertTrue("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 2)
# user01 view of event
calendar1 = (yield calendar_resource.componentForUser("user01"))
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("TRIGGER:-PT5M" in calendar1)
self.assertFalse("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
# user02 view of event
calendar1 = (yield calendar_resource.componentForUser("user02"))
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertFalse("TRIGGER:-PT5M" in calendar1)
self.assertTrue("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", calendar_name=shared_name, home="user02",))
# Unfiltered view of event
calendar1 = (yield calendar_resource.component())
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("TRIGGER:-PT5M" in calendar1)
self.assertTrue("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 2)
# user01 view of event
calendar1 = (yield calendar_resource.componentForUser("user01"))
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertTrue("TRIGGER:-PT5M" in calendar1)
self.assertFalse("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
# user02 view of event
calendar1 = (yield calendar_resource.componentForUser("user02"))
calendar1 = str(calendar1).replace("\r\n ", "")
self.assertFalse("TRIGGER:-PT5M" in calendar1)
self.assertTrue("TRIGGER:-PT10M" in calendar1)
self.assertEqual(calendar1.count("BEGIN:VALARM"), 1)
yield self.commit()
@inlineCallbacks
def test_validation_processScheduleTags(self):
"""
Test that schedule tags are correctly updated.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
SUMMARY:Changed #1
END:VEVENT
END:VCALENDAR
"""
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource.setComponent(calendar)
schedule_tag = calendar_resource.scheduleTag
yield self.commit()
data3 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
SUMMARY:Changed #2
END:VEVENT
END:VCALENDAR
"""
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data3)
yield calendar_resource.setComponent(calendar)
self.assertNotEqual(calendar_resource.scheduleTag, schedule_tag)
schedule_tag = calendar_resource.scheduleTag
yield self.commit()
data4 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE;PARTSTAT=ACCEPTED:mailto:user02@example.com
SUMMARY:Changed #2
END:VEVENT
END:VCALENDAR
"""
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data4)
yield calendar_resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.ORGANIZER_ITIP_UPDATE)
self.assertEqual(calendar_resource.scheduleTag, schedule_tag)
yield self.commit()
@inlineCallbacks
def test_validation_duplicatePrivateCommentsOKWIthiTIP(self):
"""
Test that an iTIP update to an organizer event with duplicate private comments
does not fail.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
END:VEVENT
END:VCALENDAR
"""
data3 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE;PARTSTAT=ACCEPTED:mailto:user02@example.com
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.RAW)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data3)
yield calendar_resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.ORGANIZER_ITIP_UPDATE)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data3)
yield self.failUnlessFailure(calendar_resource.setComponent(calendar), DuplicatePrivateCommentsError)
yield self.commit()
@inlineCallbacks
def test_validation_deleteWithDuplicatePrivateComments(self):
"""
Test that attendee private comments are no longer restored.
"""
data1 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
END:VEVENT
END:VCALENDAR
"""
data2 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE:mailto:user02@example.com
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
X-CALENDARSERVER-ATTENDEE-COMMENT;X-CALENDARSERVER-ATTENDEE-REF="urn:uuid:
user02";X-CALENDARSERVER-DTSTAMP=20140224T181133Z:Comment
END:VEVENT
END:VCALENDAR
"""
data3 = """BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:12345-67890-attendee-reply
DTSTAMP:20080601T120000Z
DTSTART:20080601T120000Z
DTEND:20080601T130000Z
ORGANIZER;CN="User 01":mailto:user01@example.com
ATTENDEE:mailto:user01@example.com
ATTENDEE;PARTSTAT=ACCEPTED:mailto:user02@example.com
END:VEVENT
END:VCALENDAR
"""
calendar_collection = (yield self.calendarUnderTest(home="user01"))
calendar = Component.fromString(data1)
yield calendar_collection.createCalendarObjectWithName("test.ics", calendar)
yield self.commit()
calendar_resource = (yield self.calendarObjectUnderTest(name="test.ics", home="user01",))
calendar = Component.fromString(data2)
yield calendar_resource._setComponentInternal(calendar, internal_state=ComponentUpdateState.RAW)
yield self.commit()
def raiseHere(otherself, component, inserting, internal_state):
if component.hasDuplicatePrivateComments(doFix=False):
raise ValueError
self.patch(CalendarObject, "preservePrivateComments", raiseHere)
calendar2 = (yield self.calendarUnderTest(name="calendar_1", home="user02"))
cobjs = (yield calendar2.calendarObjects())
self.assertTrue(len(cobjs) == 1)
yield cobjs[0].setComponent(Component.fromString(data3))
yield self.commit()
calendar2 = (yield self.calendarUnderTest(name="calendar_1", home="user02"))
cobjs = (yield calendar2.calendarObjects())
calendar = yield cobjs[0].component()
self.assertTrue('SCHEDULE-STATUS=5.0' in normalize_iCalStr(calendar))
yield self.commit()
calendar2 = (yield self.calendarUnderTest(name="calendar_1", home="user02"))
cobjs = (yield calendar2.calendarObjects())
self.assertTrue(len(cobjs) == 1)
yield cobjs[0].remove()
yield self.commit()
calendar2 = (yield self.calendarUnderTest(name="calendar_1", home="user02"))
cobjs = (yield calendar2.calendarObjects())
self.assertTrue(len(cobjs) == 0)
yield self.commit()
|
|
#!/usr/bin/env python
'''
Extract reads which aren't mapped from a SAM or SAM.gz file.
Behavior for PE:
-Write out PE only if both do not map (if either of the pair maps, neither is retained)
Behavior for SE:
-Write out SE if they don't map
Iterate over a SAM or SAM.gz file. take everything where the 3rd and
4th flag bit are set to 1 and write reads out to files.
0x1 template having multiple segments in sequencing
0x2 each segment properly aligned according to the aligner
0x4 segment unmapped
0x8 next segment in the template unmapped
0x10 SEQ being reverse complemented
0x20 SEQ of the next segment in the template being reversed
0x40 the first segment in the template
0x80 the last segment in the template
0x100 secondary alignment
0x200 not passing quality controls
0x400 PCR or optical duplicate
TODO:
1) Add support for retaining both reads if one of a pair don't map but the other does
2) Add support for retaining the pair (or SE) if a read maps with low mapq
Note:
It is necessary to double check that both pairs of the PE read really exist in the SAM
file just in case it somehow gets disordered. This is taken care of by keeping the PE
reads in a set of dictionaries and then deleting them once the pair is written.
In the case where a read is somehow labeled as paired, but the pair doesn't exist, the
read is NOT written.
'''
import sys
import os
from optparse import OptionParser # http://docs.python.org/library/optparse.html
import gzip
usage = "usage: %prog [options] -o output_base inputfile.SAM"
parser = OptionParser(usage=usage, version="%prog 2.0.1")
parser.add_option('-u', '--uncompressed', help="leave output files uncompressed",
action="store_true", dest="uncompressed")
parser.add_option('-o', '--output_base', help="output file basename",
action="store", type="str", dest="output_base", default="screened")
parser.add_option('-t', '--tab-seperated', help="seperated out the output in tab",
action="store_true", dest="tabSeperated")
parser.add_option('-v', '--verbose', help="verbose output",
action="store_false", dest="verbose", default=True)
(options, args) = parser.parse_args() # uncomment this line for command line support
if len(args) == 1:
infile = args[0]
# Start opening input/output files:
if not os.path.exists(infile):
print >> sys.stderr, "Error, can't find input file %s" % infile
sys.exit()
if infile.split(".")[-1] == "gz":
insam = gzip.open(infile, 'rb')
else:
insam = open(infile, 'r')
else:
# reading from stdin
insam = sys.stdin
base = options.output_base
PE1 = {}
PE2 = {}
contig_map = {}
interleaved = False
def writeread(ID, r1, r2):
if interleaved:
if options.tabSeperated is True:
# read1
print ID + "\t" + r1[0] + "\t" + r1[1] + "\t" + r2[0] + "\t" + r2[1] + "\n"
else:
print "@" + ID + "#0/1"
print r1[0]
print '+\n' + r1[1]
# read2
print "@" + ID + "#0/2"
print r2[0]
print '+\n' + r2[1]
else:
# read1
outPE1.write("@" + ID + "#0/1" '\n')
outPE1.write(r1[0] + '\n')
outPE1.write('+\n' + r1[1] + '\n')
# read2
outPE2.write("@" + ID + "#0/2" '\n')
outPE2.write(r2[0] + '\n')
outPE2.write('+\n' + r2[1] + '\n')
i = 0
PE_written = 0
SE_written = 0
SE_open = False
PE_open = False
line2 = []
for line in insam:
# Comment/header lines start with @
if line[0] != "@" and len(line.strip().split()) > 2:
line2 = line.strip().split()
flag = int(line2[1])
if (flag & 0x100): # secondary alignment
continue
i += 1
# Handle SE:
# unapped SE reads have 0x1 set to 0, and 0x4 (third bit) set to 1
if (flag & 0x1 == 0) and (flag & 0x4):
ID = line2[0].split("#")[0]
if not SE_open:
if base == "stdout":
interleaved = True
elif options.uncompressed:
outSE = open(base + "_SE.fastq", 'w')
else:
outSE = gzip.open(base + "_SE.fastq.gz", 'wb')
SE_open = True
# interleaved just means to stdout in this case
if (interleaved):
if options.tabSeperated is True:
print ID + "\t" + line2[9] + "\t" + line2[10] + "\n"
else:
print "@" + ID
print line2[9]
print '+\n' + line2[10]
else:
outSE.write("@" + ID + '\n')
outSE.write(line2[9] + '\n')
outSE.write('+\n' + line2[10] + '\n')
SE_written += 1
continue
# Handle PE:
# logic: 0x1 = multiple segments in sequencing, 0x4 = segment unmapped, 0x8 = next segment unmapped, 0x80 the last segment in the template
if ((flag & 0x1) and (flag & 0x4) and (flag & 0x8)):
if not PE_open:
if base == "stdout":
interleaved = True
elif options.uncompressed:
outPE1 = open(base + "_PE1.fastq", 'w')
outPE2 = open(base + "_PE2.fastq", 'w')
else:
outPE1 = gzip.open(base + "_PE1.fastq.gz", 'wb')
outPE2 = gzip.open(base + "_PE2.fastq.gz", 'wb')
PE_open = True
if (flag & 0x40): # is this PE1 (first segment in template)
# PE1 read, check that PE2 is in dict and write out
ID = line2[0].split("#")[0]
r1 = [line2[9], line2[10]] # sequence + qual
if ID in PE2:
writeread(ID, r1, PE2[ID])
del PE2[ID]
PE_written += 1
else:
PE1[ID] = r1
continue
elif (flag & 0x80): # is this PE2 (last segment in template)
# PE2 read, check that PE1 is in dict and write out
ID = line2[0].split("#")[0]
r2 = [line2[9], line2[10]]
if ID in PE1:
writeread(ID, PE1[ID], r2)
del PE1[ID]
PE_written += 1
else:
PE2[ID] = r2
continue
# was mapped, count it up
# if line2 != []:
# contig = line2[2]
# if contig in contig_map.keys():
# if (flag & 0x1 == 0): # SE
# contig_map[contig]["SE"] += 1
# elif (flag & 0x40): # PE, Just count the first in the pair
# contig_map[contig]["PE"] += 1
# else:
# contig_map[contig] = {}
# if (flag & 0x1 == 0): # SE
# contig_map[contig]["SE"] = 1
# contig_map[contig]["PE"] = 0
# elif (flag & 0x40): # PE, Just count the first in the pair
# contig_map[contig]["SE"] = 0
# contig_map[contig]["PE"] = 1
# for k in contig_map.keys():
# print >> sys.stderr, "\tFound %s: percent: %.2f, PE mapped: %s, SE mapped: %s" % (k, (2*PE_written+SE_written)/i, contig_map[k]["PE"], contig_map[k]["SE"])
print >> sys.stderr, "Records processed: %s | PE_written: %s | SE_written: %s | Discarded: %s " % (i, PE_written, SE_written, i-(PE_written*2+SE_written))
if base != "stdout":
if PE_open:
outPE1.close()
outPE2.close()
if SE_open:
outSE.close()
|
|
#IGE (Indirect genetic effects) = SGE (Social genetic effects). IEE = SEE.
#the potential presence of NAs in input phenotype, covs, cages etc means that in this code we introduce two sets of animals (not discussed in the paper):
#focal animals, defined as having phenotype, covs (if covs are provided), cage and kinship
#all animals = focal animals + cage mates, where cage mates are defined as having cage and kinship and being in subset_IDs, and are referenced to using _cm in this code. Note that in the paper cage mate has a more precise meaning, namely "the other mice in the cage of the focal individual".
import sys
import warnings
import scipy as sp
import scipy.linalg as la
from limix.core.mean.mean_base import MeanBase as lin_mean
from limix.core.covar.dirIndirCov import DirIndirCov
from limix.core.covar.fixed import FixedCov
from limix.core.covar.combinators import SumCov
from limix.utils.preprocess import covar_rescaling_factor
from limix.utils.preprocess import covar_rescale
from limix.core.gp.gp_base import GP
import pdb
class DirIndirVD():
#many of inputs below are passed from SocialData object built using social_data.py
#pheno: missing values should be encoded as -999 (needs to be numeric for HDF5). pheno can be 1xN or Nx1 vector (where N is number of individuals)
#pheno_ID: IDs corresponding to pheno.
#covs: missing values should be encoded as -999. covs can be 1xN or Nx1 vector, or a NxK matrix (K number of covariates)
#covs_ID: IDs corresponding to covs
#kinship_cm: GRM for all mice ever considered (focal or cagemate). !!!contrary to what notation says here, this is all mice = focal + cage mates, not only cm. no missing values allowed. symmetric matrix.
#kinship_cm_ID: rownames and colnames of kinship_cm
#cage_cm: cages of all mice ever considered (focal or cagemate). missing values should be encoded as NA. NA cage will lead to this animal to be ignored from the analysis. if you have reason to believe this animal was a cage mate (but you don't know its cage), this is an issue and you need to be aware of it.
#cage_cm_ID: IDs corresponding to cage_cm
#independent_covs: if True (default) LIMIX will check whether any covariate is a linear combination of the others and if so fix the issue by updating covs to independent columns
#DGE: should DGE be included in the model?
#IGE: should IGE be included in the model?
#IEE: should IEE be included in the model? note that if IGE = True, IEE will be set to True by the program (silently) - see below
#cageEffect: should cage effects be included in the model?
#calc_ste: should the standard errors of the variance components be estimated and output?
#standardize_pheno: should the phenotype be standardized to variance 1?
#subset_IDs: subset of IDs to consider *as focal individuals or cage mate*. this means that any animal not in subset_IDs will be ignored completely from analysis. correct only if all animals from a cage are excluded! if instead want to exclude animals only as focal individuals, set their phenotype to NA.
def __init__(self, pheno = None, pheno_ID = None, covs = None, covs_ID = None, kinship_cm = None, kinship_cm_ID = None, cage_cm = None, cage_cm_ID = None, independent_covs = True, DGE = True, IGE = False, IEE = False, cageEffect = False, calc_ste = False, standardize_pheno = True, subset_IDs = None):
self.parseNmore(pheno, pheno_ID, covs, covs_ID, kinship_cm, kinship_cm_ID, cage_cm, cage_cm_ID, independent_covs,standardize_pheno, subset_IDs)
# the purpose of the IEE argument is to specify whether IEE should be off when IGE are off. When IGE are on, IEE must be on and therefore IEE will be automatically set to True when IGE is True.
if IGE:
IEE = True
#VD defines the genetic, environmental and cage covariance matrices
self.VD(DGE = DGE, IGE = IGE, IEE = IEE, cageEffect = cageEffect)
#optimize estimates the variance components
self.optimize()
# get_VCs gets the variance components, standard errors, etc. for output
# self.output useful to retrieve output using getOutput without having to specify calc_ste, DGE, IGE, etc.
self.output = self.get_VCs (calc_ste, DGE = DGE, IGE = IGE, IEE = IEE, cageEffect = cageEffect)
def parseNmore(self, pheno, pheno_ID, covs, covs_ID, kinship_cm, kinship_cm_ID, cage_cm, cage_cm_ID, independent_covs,standardize_pheno, subset_IDs):
"""match various inputs"""
assert pheno is not None, 'Specify pheno!'
assert pheno_ID is not None, 'Specify pheno IDs!'
assert pheno.shape[0] == len(pheno_ID), 'Lengths of pheno and pheno IDs do not match!'
assert kinship_cm is not None, 'Specify kinship!'
assert kinship_cm_ID is not None, 'Specify kinship IDs!'
assert kinship_cm.shape[0] == kinship_cm.shape[1], 'Kinship is not a square matrix!'
assert kinship_cm.shape[0] == len(kinship_cm_ID), 'Dimension of kinship and length of kinship IDs do not match!'
assert cage_cm is not None, 'No social analysis possible if cage information is unavailable. For DGE analysis, use regular LIMIX https://github.com/PMBio/limix'
####hack to shorten runtime
# uCage=sp.unique(cage_cm)
# remove=uCage[0:510]
# idx_remove = sp.array(sp.concatenate([sp.where(cage_cm==remove[i])[0] for i in range(len(remove))]))
# cage_cm[idx_remove]='NA'
####hack to shorten runtime
#1. define set of animals with cage and kinship information (_cm)
#1.1 _cm animals need to be in subset_IDs
if subset_IDs is not None:
Imatch = sp.nonzero(subset_IDs[:,sp.newaxis]==kinship_cm_ID)
kinship_cm = kinship_cm[Imatch[1],:][:,Imatch[1]]
kinship_cm_ID=kinship_cm_ID[Imatch[1]]
#1.2 NA allowed in cage information so first of all exclude missing cage data and corresponding animals
has_cage = (cage_cm!='NA')
if sum(has_cage)==0:
cage_cm = None
assert cage_cm is not None, 'No social analysis possible if cage information is unavailable. For DGE analysis, use regular LIMIX https://github.com/PMBio/limix'
cage_cm=cage_cm[has_cage]
cage_cm_ID=cage_cm_ID[has_cage]
#1.3 match cages and kinship as we need both for any social analysis. note that if this is a non social analysis and cage is specified (cage_cm not None), then only those individuals with known cage will be included in the analysis. This is useful to keep same sample between social and non social analyses.
Imatch = sp.nonzero(cage_cm_ID[:,sp.newaxis]==kinship_cm_ID)
cage_cm_ID = cage_cm_ID[Imatch[0]]
cage_cm = cage_cm[Imatch[0]]
kinship_cm = kinship_cm[Imatch[1],:][:,Imatch[1]]
kinship_cm_ID=kinship_cm_ID[Imatch[1]]
#(kinship_cm_ID==cage_cm_ID).all()
#True
#cage and kinship now have no missing values and are matched - IDs are in cage_ID and kinship_cm_ID
# put IDs in sampleID_cm
sampleID_cm = kinship_cm_ID
assert len(sampleID_cm)!=0, 'No _cm animals'
#2. define focal animals now: those with non missing phenotype and non missing covs, kinship and cage, and in subset_IDs
#2.1 remove NAs from pheno
if len(pheno.shape)==1:
pheno = pheno[:,sp.newaxis]
has_pheno = (pheno!=(-999))[:,0]
pheno=pheno[has_pheno,:]
pheno_ID=pheno_ID[has_pheno]
#2.2 add intercept to covs
#if no covs are provided, make it a vector of 1s for intercept
if covs is None:
covs = sp.ones((pheno.shape[0],1))
covs_ID = pheno_ID
#if covs are provided, append a vector of 1s for intercept
else:
new_col=sp.ones([covs.shape[0],1])
if len(covs.shape)==1:
covs = covs[:,sp.newaxis]
covs=sp.append(new_col,covs,1)
#2.3 remove NAs from covs
has_covs = (covs!=(-999)).all(1)
covs=covs[has_covs,:]
covs_ID=covs_ID[has_covs]
#2.4 match pheno and covs
Imatch = sp.nonzero(pheno_ID[:,sp.newaxis]==covs_ID)
pheno = pheno[Imatch[0],:]
pheno_ID=pheno_ID[Imatch[0]]
covs = covs[Imatch[1],:]
covs_ID=covs_ID[Imatch[1]]
#(pheno_ID==covs_ID).all()
#True
#pheno and covs now have no missing values and are matched - IDs are in pheno_ID and covs_ID
#2.5 check which of those are in sampleID_cm (and thus have kinship and cage)
has_geno = sp.array([pheno_ID[i] in sampleID_cm for i in range(pheno_ID.shape[0])])
pheno = pheno[has_geno,:]
covs = covs[has_geno,:]
#create sampleID that has focal individuals.
sampleID=pheno_ID[has_geno]
assert len(sampleID)!=0, 'No focal animals'
#remember sampleID_cm and sampleID are in different order (and of different length possibly)
#3. create cage and kinship for focal animals
idxs = sp.array([sp.where(sampleID_cm==sampleID[i])[0][0] for i in range(sampleID.shape[0])])
cage=cage_cm[idxs]
if len(cage.shape)==1:
cage = cage[:,sp.newaxis]
kinship=kinship_cm[idxs,:][:,idxs]
#4. create focal x _cm genetic cross-covariance
kinship_cross = kinship_cm[idxs,:]
#so sampleID along rows and sampleID_cm along colummns
#5. now create environmental matrices
env = sp.eye(kinship.shape[0])
env_cm = sp.eye(kinship_cm.shape[0])
env_cross = env_cm[idxs,:]
# standardize_pheno should be True for CV if want to output 1-mse
if standardize_pheno:
pheno -= pheno.mean(0)
pheno /= pheno.std(0)
print('Pheno has been standardized')
if independent_covs:
tol = 1e-6
R = la.qr(covs,mode='r')[0][:covs.shape[1],:]
I = (abs(R.diagonal())>tol)
if sp.any(~I):
warnings.warn('Covariate cols '+str(sp.where(~I)[0])+' have been removed because linearly dependent on the others')
covs = covs[:,I]
self.sampleID=sampleID
self.pheno=pheno
self.covs=covs
self.cage=cage
self.kinship=kinship
self.env=env
self.sampleID_cm=sampleID_cm
self.cage_cm=cage_cm
self.kinship_cm=kinship_cm
self.env_cm=env_cm
self.kinship_cross=kinship_cross
self.env_cross=env_cross
def VD(self, DGE, IGE, IEE, cageEffect):
""" defines covariance for variance decomposition."""
#defines mean
mean = lin_mean(self.pheno,self.covs)
#define cagemate assignment - required for SGE, SEE, and cage effects. Z is N focal x N_cm and has 0s in cells Z_i,i (i.e. an animal is not its own cage mate)
same_cage = 1. * (self.cage==self.cage_cm)
diff_inds = 1. * (self.sampleID[:,sp.newaxis]!=self.sampleID_cm)
Z = same_cage * diff_inds
#define the overall genetic covariance matrix
if DGE or IGE:
#scales kinship (DGE component) to sample variance 1
sf_K = covar_rescaling_factor(self.kinship)
self.kinship *= sf_K
#now create and scale SGE and DGE/SGE covariance components
if IGE:
#first SGE component: ZKcmZ' in this code (ZKZ' in paper)
_ZKcmZ = sp.dot(Z,sp.dot(self.kinship_cm,Z.T))
sf_ZKcmZ = covar_rescaling_factor(_ZKcmZ)
self.kinship_cm *= sf_ZKcmZ
#second DGE/SGE covariance:
self.kinship_cross *= sp.sqrt(sf_K * sf_ZKcmZ)
if DGE and not IGE:
self._genoCov = FixedCov(self.kinship)
elif IGE and not DGE:
self._genoCov = FixedCov(_ZKcmZ)
elif DGE and IGE:
self._genoCov = DirIndirCov(self.kinship,Z,kinship_cm=self.kinship_cm,kinship_cross=self.kinship_cross)
else:
self._genoCov = None
#define the overall environmental covariance matrix
#there is always DEE
#env naturally has sample variance 1 so no need to scale it
if IEE:
#_ZZ = ZIcmZ'
_ZZ = sp.dot(Z,Z.T)
sf_ZZ = covar_rescaling_factor(_ZZ)
self.env_cm *= sf_ZZ
self.env_cross *= sp.sqrt(1 * sf_ZZ)
self._envCov = DirIndirCov(self.env,Z,kinship_cm=self.env_cm,kinship_cross=self.env_cross)
else:
self._envCov = FixedCov(self.env)
##define cage effect covariance matrix
if cageEffect:
N = self.pheno.shape[0]
uCage = sp.unique(self.cage)
#W, the cage design matrix, is N x n_cages (where N is number of focal animals)
W = sp.zeros((N,uCage.shape[0]))
for cv_i, cv in enumerate(uCage):
W[:,cv_i] = 1.*(self.cage[:,0]==cv)
#WW, the cage effect covariance matrix, is N x N and has 1s in cells WW_i,i
WW = sp.dot(W,W.T)
#this is equivalent to getting covar_rescaling_factor first and then multiplying, as done for other matrices above
WW = covar_rescale(WW)
self._cageCov = FixedCov(WW)
else:
self._cageCov = None
# define overall covariance matrix as sum of genetic, environmental and cage covariance matrices
if self._genoCov is None:
if self._cageCov is None:
self.covar = SumCov(self._envCov)
else:
self.covar = SumCov(self._envCov,self._cageCov)
else:
if self._cageCov is None:
self.covar = SumCov(self._genoCov,self._envCov)
else:
self.covar = SumCov(self._genoCov,self._envCov,self._cageCov)
## define gp
self._gp = GP(covar=self.covar,mean=mean)
def optimize(self):
"""optimises the covariance matrix = estimate variance components"""
if 0:
# trial for inizialization it is complicated though
cov = sp.array([[0.2,1e-4],[1e-4,1e-4]])
self._genoCov.setCovariance(cov)
self._envCov.setCovariance(cov)
self._cageCov.scale = 0.2
else:
self._gp.covar.setRandomParams()
#optimization - keep calc_ste = False
self.conv, self.info = self._gp.optimize(calc_ste = False)
def get_VCs(self, calc_ste, DGE, IGE, IEE, cageEffect):
"""function to access estimated variance components, standard errors"""
if calc_ste:
try:
STE_output = self.getGenoSte(DGE, IGE, IEE, cageEffect)
genetic_STEs = STE_output['R']
#not sure what corr_params are
corr_params = STE_output['corr_params']
except:
genetic_STEs = sp.array([[-999,-999],[-999,-999]])
corr_params = (-999)
else:
genetic_STEs = sp.array([[-999,-999],[-999,-999]])
corr_params = (-999)
R = {}
#whether the run converged
R['conv'] = self.conv
#should be small (e.g. < 10^-4)
R['grad'] = self.info['grad']
#Contrary to what it says, this is -LML
R['LML'] = self._gp.LML()
#number of focal animals
R['sample_size'] = len(self.sampleID)
#number of _cm animals. note that this doesnt say much as an animal that is not phenotyped nor a cage mate could still be in there
R['sample_size_cm'] = len(self.sampleID_cm)
#standard error for DGE
R['STE_Ad'] = genetic_STEs[0,0]
#standard error for SGE
R['STE_As'] = genetic_STEs[1,1]
#standard error for covariance between DGE and SGE
R['STE_Ads'] = genetic_STEs[0,1]
#covariance between DGE and SGE estimates
R['corr_params'] = corr_params
#effect sizes of fixed effects in the model
R['b'] = self._gp.mean.b
#variance components and total genetic variance below
if DGE and (not IGE):
R['var_Ad'] = self._genoCov.scale
R['var_As'] = (-999)
R['corr_Ads'] = (-999)
R['total_gen_var'] = 1/covar_rescaling_factor(self._genoCov.K())
elif IGE and (not DGE):
R['var_Ad'] = (-999)
R['var_As'] = self._genoCov.scale
R['corr_Ads'] = (-999)
R['total_gen_var'] = 1/covar_rescaling_factor(self._genoCov.K())
elif DGE and IGE:
R['var_Ad'] = self._genoCov.covff.K()[0,0]
R['var_As'] = self._genoCov.covff.K()[1,1]
R['corr_Ads'] = self._genoCov.covff.K()[0,1]/(sp.sqrt(R['var_Ad']) * sp.sqrt(R['var_As']))
R['total_gen_var'] = 1/covar_rescaling_factor(self._genoCov.K())
else:
R['var_Ad'] = (-999)
R['var_As'] = (-999)
R['corr_Ads'] = (-999)
R['total_gen_var'] = (-999)
if not IEE:
R['var_Ed'] = self._envCov.scale
R['var_Es'] = (-999)
R['corr_Eds'] = (-999)
else:
R['var_Ed'] = self._envCov.covff.K()[0,0]
R['var_Es'] = self._envCov.covff.K()[1,1]
R['corr_Eds'] = self._envCov.covff.K()[0,1] / (sp.sqrt(R['var_Ed']) * sp.sqrt(R['var_Es']))
if cageEffect:
R['var_C'] = self._cageCov.scale
#environmental covariance matrix (fitted)
envK = self._envCov.K() + self._cageCov.K()
#overall (total) covariance matrix (fitted)
if DGE or IGE:
totK = self._genoCov.K() + self._envCov.K() + self._cageCov.K()
else:
totK = self._envCov.K() + self._cageCov.K()
else:
R['var_C'] = (-999)
envK = self._envCov.K()
if DGE or IGE:
totK = self._genoCov.K() + self._envCov.K()
else:
totK = self._envCov.K()
#calculate sample variance of environmental matrix and overall covariance matrices
R['total_env_var'] = 1/covar_rescaling_factor(envK)
R['total_var'] = 1/covar_rescaling_factor(totK)
self.total_variance = R['total_var']
return R
def getGenoSte(self, DGE, IGE, IEE, cageEffect):
self._gp.covar.getFisherInf()
F = self._gp.covar.getFisherInf()
# scalar in front of each term
# ordering for geno and env is
# direct, covar, indirect as in fisher matrix
aP = []
vi = []
if DGE and (not IGE):
aP.append(self._genoCov.scale)
vi.append(1. / covar_rescaling_factor(self._genoCov.K0))
elif IGE and (not DGE):
aP.append(self._genoCov.scale)
vi.append(1. / covar_rescaling_factor(self._genoCov.K0))
elif DGE and IGE:
aP.append(self._genoCov.covff.K()[0,0])
aP.append(self._genoCov.covff.K()[0,1])
aP.append(self._genoCov.covff.K()[1,1])
vi.append(1. / covar_rescaling_factor(self._genoCov._K))
vi.append(1. / covar_rescaling_factor(self._genoCov._KZ + self._genoCov._ZK))
vi.append(1. / covar_rescaling_factor(self._genoCov._ZKZ))
else:
pass
if not IEE:
aP.append(self._envCov.scale)
vi.append(1. / covar_rescaling_factor(self._envCov.K0))
else:
aP.append(self._envCov.covff.K()[0,0])
aP.append(self._envCov.covff.K()[0,1])
aP.append(self._envCov.covff.K()[1,1])
vi.append(1. / covar_rescaling_factor(self._envCov._K))
vi.append(1. / covar_rescaling_factor(self._envCov._KZ + self._envCov._ZK))
vi.append(1. / covar_rescaling_factor(self._envCov._ZKZ))
if cageEffect:
aP.append(self._cageCov.scale)
vi.append(1. / covar_rescaling_factor(self._cageCov.K0))
else:
pass
# make them vectors
aP = sp.array(aP)
vi = sp.array(vi)
# overall variance
# this should correspond to the one you get from sampling
v = (aP*vi).sum()
# fractions of variance exaplined by each term
# (can be negative)
h = (aP*vi) / v
# jacobean
J = sp.zeros((aP.shape[0], aP.shape[0]))
J[:, 0] = h / vi
J[-1, 1:] = -v / vi[-1]
for i in range(aP.shape[0]-1):
J[i, i+1] = v / vi[i]
# transformation of Fisher
Fnew = sp.dot(J.T, sp.dot(F, J))
# invert the new Fisher
S,U = sp.linalg.eigh(Fnew)
I = S>1e-9
U = U[:,I]
S = S[I]
FI = sp.dot(U,sp.dot(sp.diag(S**(-1)),U.T))
# reorder to have same ordering as before
idxs = list(range(1, aP.shape[0]))
idxs.append(0)
FI = FI[idxs, :][:, idxs]
# R is 2x2 matrix: STE_Ad and STE_As on diag, STE_Ads off
R = sp.zeros((2, 2))
STE_output = {}
if DGE and IGE:
FI_geno = FI[:3,:][:,:3]
#STEs = sp.sqrt(FI_geno.diagonal()) ( ordered as Ad Ads As)
#STEs = sqrt of var of VC corr_params
#fills diag and 1 off first
R[sp.tril_indices(2)] = sp.sqrt(FI_geno.diagonal())
#now fills other off
R = R + R.T - sp.diag(R.diagonal())
corr_param_Ad_As = FI_geno[0,2]/(sp.sqrt(FI_geno[0,0])*sp.sqrt(FI_geno[2,2]))
elif DGE and (not IGE):
R[0,0] = sp.sqrt(FI[0,0])
R[0,1] = -999
R[1,0] = -999
R[1,1] = -999
corr_param_Ad_As = -999
elif (not DGE) and IGE:
R[0,0] = -999
R[0,1] = -999
R[1,0] = -999
R[1,1] = sp.sqrt(FI[0,0])
corr_param_Ad_As = -999
else:
R[0,0] = -999
R[0,1] = -999
R[1,0] = -999
R[1,1] = -999
corr_param_Ad_As = -999
STE_output['R']=R
STE_output['corr_params']= corr_param_Ad_As
return STE_output
def getOutput(self):
"""to get output without having to specify DGE, SGE, ...."""
return self.output
|
|
# -*- coding: utf-8 -*-
import numpy as np
import linecache
import math
# Constants
eps0 = 8.854187817e-14 # F*cm^-1
q = 1.602176462e-19 # C
def cv_read(cv_file, model):
""" Read Capacitance-Voltage data from *.CV file
Parameters
----------
cv_file : str, path to the CV file
model : str, 'Cp','Cs' or 'Diss' - model for calculation capacitance
Returns
-------
Cp, Cs : float, capacitance in uF/cm^2
voltage : float, voltage in V
"""
# get area, size and frequency from line 12 of CV file
# example: 0 0.106 2327.1 443.02 0
# (?) (area) (first frequency in rad) (second freq) (?)
cv_properties = linecache.getline(cv_file, 12)
cv_properties = cv_properties.split()
area = float(cv_properties[1])
freq = float(cv_properties[2])
# build voltage array from values in line 15
# example: -0.64142 0.008 19 100
# (start voltage) (step) (number of negative steps) (total number of steps)
voltage_range = linecache.getline(cv_file, 15)
voltage_range = voltage_range.split()
vstart = float(voltage_range[0])
vstep = float(voltage_range[1])
nneg = int(voltage_range[2])
npos = int(voltage_range[3]) - nneg
voltage = np.concatenate((
np.linspace(vstart, vstart-vstep*nneg, nneg+1),
np.linspace(vstart, vstart+vstep*npos, npos,
endpoint=False)
), axis=0)
# Y is complex admittance (Gp+Bp*i)
cnv = dict.fromkeys([0], lambda x: complex(*eval(x)))
Y = np.genfromtxt(cv_file, converters=cnv,
delimiter=25, skip_header=15)
# sort array
array = np.vstack((voltage, Y.real, Y.imag)).T
array = array[array[:, 0].argsort(0)]
voltage = array[:, 0]
Y.real = array[:, 1]
Y.imag = array[:, 2]
Diss = Y.real/Y.imag
Cp = 1e6*Y.imag/(freq*area)
Cs = Cp*(1+Diss**2)
if model == 'Cp':
return Cp, voltage
if model == 'Cs':
return Cs, voltage
if model == 'Diss':
return Diss, voltage
def iv_read(iv_file):
""" Read Current-Voltage data from *.IV file
Parameters
----------
cv_file : str, path to the IV file
Returns
-------
current : float, current in mA/cm^2
voltage : float, voltage in V
"""
# build voltage array from values in line 15
# example: -0.64142 0.008 19 100
# (start voltage) (step) (number of negative steps) (total number of steps)
voltage_range = linecache.getline(iv_file, 15)
voltage_range = voltage_range.split()
vstart = float(voltage_range[0])
vstep = float(voltage_range[1])
nneg = int(voltage_range[2])
npos = int(voltage_range[3]) - nneg
voltage = np.concatenate((
np.linspace(vstart, vstart-vstep*nneg, nneg+1),
np.linspace(vstart, vstart+vstep*npos, npos,
endpoint=False)
),
axis=0)
# read current
current = np.genfromtxt(iv_file, skip_header=15)
# sort array
array = np.vstack((voltage, current)).T
array = array[array[:, 0].argsort(0)]
voltage = array[:, 0]
current = array[:, 1]
return current, voltage
def ep_read(ep_file):
""" Read measured doping profile from *.EP file
Parameters
----------
ep_file : str, path to the EP file
Returns
-------
doping : float, doping level in cm^-3
depth : float, depletion width + etched width in um
"""
data = np.genfromtxt(ep_file, skip_header=13, names=['depth', 'doping'])
doping = data['doping']
depth = data['depth']
return doping, depth
def log_read(log_file, *field_name):
""" Read data from ecv log file
Parameters
----------
log_file : str, path to log file
field_name : tuple of str, name of field from the list
list=('No', 'Lmp', 'MC', 'V-etch', 'I-etch',
'V-meas', 'I-meas', 'Dis', 'FBP', 'Wr',
'Wd', 'X', 'N', 'F1', 'F2', 'Amp', 'dV')
Returns
-------
data : tuple of nparrays
"""
dtype = np.dtype([('No', '>i4'), ('Lmp', '>i4'), ('MC', '|S8'),
('V-etch', '>f4'), ('I-etch', '>f4'), ('V-meas', '>f4'),
('I-meas', '>f4'), ('Dis', '>f4'), ('FBP', '>f4'),
('Wr', '>f4'), ('Wd', '>f4'), ('X', '>f4'), ('N', '>f4'),
('F1', '>f4'), ('F2', '>f4'), ('Amp', '>f4'), ('dV', '>f4')])
f = open(log_file, 'r')
exclude = ['Spot', 'Value','Freq.', 'Dis.', 'C', 'G', 'Rs',
'dC/dV', 'FBP', 'Depl.', 'N', 'No.', 'ECVpro',
'ID:', 'Description:', 'Saved', 'Spot:', 'Etch',
'Ring:', 'Recipe:', 'Electrolyte:', 'Pot:',
'Contact', 'ECVision', 'Surface']
data = []
for line in f:
if not [s for s in line.split() if s in exclude] \
and not line.split() == []:
if 'F1=' not in line:
data.append(tuple(line.split()) +
(mp['F1'], mp['F2'], mp['Amp'], mp['dV']))
else:
mp = dict(s.split('=') for s in line.split(', '))
data = np.vstack(np.array(data, dtype=dtype))
return tuple(np.reshape(data[i], -1) for i in field_name)
def lin_fit(capacitance, voltage, vmin=None, vmax=None, eps=15.15):
""" Returns linear fit for measured 1/C^2 and calculated Doping level
Parameters
----------
capacitance : float, capacitance in uF/cm^2
voltage : float, voltage in V
vmin, vmax : float, range for linear fitting
eps : float, dielectric constant (default is for InAs)
Returns
-------
cap_fit: ndarray, 1/C^2 in cm^4/uF^2
volt_fit: ndarray, voltage in V
doping: float, calculated doping level in cm^-3
"""
if not vmin: vmin = min(voltage)
if not vmax: vmax = max(voltage)
volt_in_rage, cap_in_rage = [], []
for i, volt in enumerate(voltage):
if vmin <= volt <= vmax:
volt_in_rage.append(voltage[i])
cap_in_rage.append(capacitance[i])
volt_in_rage = np.asarray(volt_in_rage)
cap_in_rage = np.asarray(cap_in_rage)
coeff = np.polyfit(volt_in_rage, 1/cap_in_rage**2, 1)
# volt_fit contain two point
# first 1/C^2 --> 0, second is based on maximum capacitance
b = max(1/capacitance**2)
k = 10**round(math.log10(b))
volt_fit = [-coeff[1]/coeff[0],
(k*math.ceil(b/k)-coeff[1])/coeff[0]]
volt_fit = np.array(volt_fit)
cap_fit = np.polyval(coeff, volt_fit)
# doping calculation
doping = -1e-12*2/(coeff[0]*q*eps*eps0) # cm^-3
return cap_fit, volt_fit, doping
|
|
'''
parse_schedules.py
Andrew Benson
Parses scheduling information from the Schedule Of Classes for a
given school quarter and year
for reference, here is what each column number refers to in the raw HTML
0: COURSE i.e. '15122'
1: TITLE i.e. 'Principles of Imperative Computation'
2: UNITS i.e. '10.0'
3: LEC/SEC i.e. 'Lec 1', 'M'
4: DAYS i.e. 'TR'
5: BEGIN i.e. '09:00AM'
6: END i.e. '10:20AM'
7: BUILDING/ROOM i.e. 'DH 2210'
8: LOCATION i.e. 'Pittsburgh, Pennsylvania'
9: INSTRUCTOR i.e. 'Simmons, Wright'
It's hard to determine what is a lecture and what is a section/recitation.
After extended examination of course data and how it shows up in SIO, I have
found two main types of courses: letter-lectures and ... non-letter-lectures.
Non-letter-lectures are courses like 15-122 (Principles of Imperative
Computation) or 80-180 (Nature of Language). The course has large central
meeting(s) that a large portion of the students attend (the lectures) each of
which are separated into sections (usually denoted with letters). The
lectures themselves are denoted with something like 'Lec' or 'Lec 1' or 'W'
(for a Qatar lecture). Sometimes they are even denoted with numbers.
Letter-lectures are courses like 21-295 (Putnam Seminar) or 15-295
(Competition Programming and Problem Solving). These are courses without large
central meetings that opt instead for a division into smaller (but still
significant) lettered groups. Because typically each group is taught by an
instructor and not by a TA, I call these lettered groups 'lectures'. Courses
meant for only certain majors, like advanced physics courses, have only one
lettered lecture and comprise much of this category of courses.
'''
import urllib.request
import bs4
import sys
QUARTERS = {
'S': 'spring',
'M1': 'summer_1',
'M2': 'summer_2',
'F': 'fall'
}
URL_FMT = 'http://enr-apps.as.cmu.edu/assets/SOC/sched_layout_%s.htm'
def get_page(quarter):
'''
return a BeautifulSoup that represents the HTML page specified by quarter
quarter: one of ['S', 'M1', 'M2', 'F']
if get_page fails, None will be returned
'''
# set the URL based on the requested quarter
if quarter not in QUARTERS:
raise ValueError('quarter %s is invalid, it must be one of %s' %
(quarter, set(QUARTERS.keys())))
url = URL_FMT % QUARTERS[quarter]
# obtain and return data
try:
response = urllib.request.urlopen(url)
except:
return None
return bs4.BeautifulSoup(response.read(), 'html.parser')
def get_table_rows(page):
'''
return a list of relevant <tr> bs4 Tags
page: a BeautifulSoup with a <table> with interesting rows
'''
# the first row is a weird empty row
# the second row is the header row (Course, Title, Units, etc.)
return page.find_all('tr')[2:]
def fix_known_errors(page):
'''
return a BeautifulSoup representing a fixed version of page
page: a Beautiful Soup representing a malformed HTML page
CMU doesn't seem to know how to write HTML. I could rant more,
but that doesn't fix the issue. Here's a list of known issues:
- The first row following a department name lacks a starting <tr> tag
(this causes BeautifulSoup to skip over it when finding <tr>'s). Even
worse, BeautifulSoup gets so confused that each time it happens it ends
the HTML document there (with a </html> and such) and begins a new one.
- Some rows don't even have 10 columns (i.e. they leave out the
instructor column for no good reason). that's just annoying to parse.
- Some rows decide to split everything into two rows JUST 'CAUSE THEY CAN.
To be more specific, it looks like the course title is split into two.
SIO's behavior appears to be just using the second line.
- Some rows are empty except for the course title, sometimes appended with
a colon. Not sure what's up with that, but it adds nonsense meetings to
the previous section.
'''
for row_tag in get_table_rows(page):
# detect department name. if found, bundle the tds into a tr
row = process_row(row_tag)
if row[0] and not row[0].isdigit():
tds = []
last_not = row_tag
# find all tds up to next non-td
while True:
# sometimes we hit the end of the document due to corrupted bs4
# parsing
if not last_not.next_sibling:
break
# idk why there are newlines, but we ignore them
elif last_not.next_sibling == '\n':
last_not = last_not.next_sibling
continue
# just in case we don't hit corrupted bs4 parsing after all
elif last_not.next_sibling.name != 'td':
break
# extract removes it from the document, and it acts like a
# doubly-linked list, so it patches the next_sibling pointers
else:
tds.append(last_not.next_sibling.extract())
# make a new tr tag, add in the tds
tr = page.new_tag('tr')
counter = 0
for td in tds:
tr.append(td)
counter += 1
# ensure that the new row has 10 columns
while counter < 10:
tr.append(page.new_tag('td'))
counter += 1
# paste it back in
row_tag.insert_after(tr)
# continue with this new row
row_tag = row_tag.next_sibling
row = process_row(tr)
# detect a row with only a course number, title, and credits
if all(row[:3]) and not any(row[3:]):
# extract course number and credits, and move to following row.
# then delete this orphan row
course_num = row[0]
course_credits = row[2]
next_row = row_tag
while True:
next_row = next_row.next_sibling
if next_row != '\n':
break
next_row.contents[0].string = course_num
next_row.contents[2].string = course_credits
row_tag.extract()
# detect a row that's empty except for possibly the course title.
# delete.
elif not row[0] and row[1] and not any(row[2:]):
row_tag.extract()
else:
# ensure that the new row has 10 columns
i = len(row)
while i < 10:
row_tag.append(page.new_tag('td'))
i += 1
def process_row(row_tag):
'''
return row_tag as a list of HTML-tag-stripped strings
row_tag: a <tr> bs4 Tag, where each column contains exactly one string
'''
res = []
for tag in row_tag.children:
if not tag.string or tag.string.isspace():
res.append(None)
else:
res.append(tag.string)
return res
def parse_row(row):
'''
return (kind, data) where kind represents the kind of data returned
row: list of HTML-tag-stripped strings that represent a data table row
example return values:
('department', 'Computer Science')
('course', { num: 15122, title: 'Principles of Imperative...', ...})
('lecsec', { section: 'N', days: ['M'], ...})
('meeting', { days: ['N'], begin: '03:30PM', ...})
(None, {})
'''
# local helper functions
def parse_lec_sec(lec_sec_data):
'''
return a dictionary containing the values in lec_sec_data
'''
data = {}
data['times'] = [parse_meeting(lec_sec_data)]
data['name'] = lec_sec_data[3]
if lec_sec_data[9]:
data['instructors'] = \
[inst for inst in lec_sec_data[9].split(', ')]
else:
data['instructors'] = None
return data
def build_day_list(text):
'''
Convert string of course days to array of ints
'''
CHAR_INT_MAP = {
'U': 0,
'M': 1,
'T': 2,
'W': 3,
'R': 4,
'F': 5,
'S': 6
}
if text == 'TBA':
return None
output = []
for char in list(text):
output.append(CHAR_INT_MAP[char])
return output
def parse_meeting(meeting_data):
'''
return a dictionary containing the values in meeting_data
'''
data = {}
data['days'] = build_day_list(meeting_data[4])
data['begin'] = meeting_data[5]
data['end'] = meeting_data[6]
if meeting_data[7] == 'TBA':
data['building'] = None
data['room'] = None
else:
parts = meeting_data[7].split(' ', 1)
data['building'] = parts[0]
data['room'] = parts[1]
data['location'] = meeting_data[8]
return data
# the data can be very irregular, so we wrap with try-except
try:
# case department (non-empty, non-numeric string course)
if row[0] and not row[0].isdigit():
return ('department', row[0])
# case course (determined by having a numeric course)
elif row[0] and row[0].isdigit():
data = {}
data['num'] = row[0]
data['title'] = row[1]
data['units'] = row[2]
data['lectures'] = [parse_lec_sec(row)]
data['sections'] = []
return ('course', data)
# case lecture or section
elif row[3]:
return ('lecsec', parse_lec_sec(row))
# case meeting
else:
return ('meeting', parse_meeting(row))
except Exception as e:
print('Failed to parse row: %s; %s' % (row, e))
return (None, {})
def extract_data_from_row(tr, data, curr_state):
'''
extract the data from tr and put it in data. update curr_state accordingly
'''
# helper functions
def is_lecture(letter, is_first_line):
'''
return whether the letter represents a lecture (rather than a section)
'''
letter = letter.lower()
if is_first_line:
# W can be a lecture, but only if it's on the first line
# weirdly enough, lectures can sometimes be simple numbers
return 'lec' in letter or 'w' in letter or letter.isdigit()
else:
return 'lec' in letter
# parse the row into a dictionary
(kind, row_data) = parse_row(process_row(tr))
# determine whether to store the dictionary, and update curr_state
if kind == 'department':
curr_state['curr_department'] = row_data
elif kind == 'course':
curr_state['curr_course'] = row_data
curr_state['curr_course']['department'] = curr_state['curr_department']
# the course determines whether lectures are denoted with 'lec' or
# letters
if not is_lecture(row_data['lectures'][0]['name'], True):
curr_state['is_letter_lecture'] = True
else:
curr_state['is_letter_lecture'] = False
curr_state['curr_lecture'] = row_data['lectures'][0]
curr_state['curr_lec_sec'] = row_data['lectures'][0]
curr_state['curr_course']['sections'] = []
data.append(curr_state['curr_course'])
elif kind == 'lecsec':
curr_state['curr_lec_sec'] = row_data
# if course is a letter-lecture, then this is for sure another lecture
if curr_state['is_letter_lecture']:
# add in lecture
curr_state['curr_course']['lectures'].append(row_data)
# not-letter-lecture
else:
# determine if lecture or section
if is_lecture(row_data['name'], False):
curr_state['curr_lecture'] = row_data
# add in lecture
curr_state['curr_course']['lectures'].append(row_data)
else:
# add in section
curr_state['curr_course']['sections'].append(row_data)
elif kind == 'meeting':
curr_state['curr_lec_sec']['times'].append(row_data)
else:
raise Exception('Unexpected kind: %s', kind)
def parse_schedules(quarter):
'''
given a quarter, return a Python dictionary representing the data for it
quarter: one of ['S', 'M1', 'M2', 'F']
'''
# get the HTML page
print('Requesting the HTML page from the network...')
page = get_page(quarter)
if not page:
print('Failed to obtain the HTML document! '
'Check your internet connection.')
sys.exit()
print('Done.')
# get the semester
semester = page.find_all('b')[1].get_text()[10:]
# fix errors on page and extract rows
print('Fixing errors on page...')
fix_known_errors(page)
print('Done.')
print('Finding table rows on page...')
trs = get_table_rows(page)
print('Done.')
# parse each row and insert it into 'data' as appropriate
curr_state = {
'curr_course': None, # where the course should go
'curr_lec_sec': None, # where meeting times should go
'curr_lecture': None, # where lectures should go
'curr_department': None, # where the department should go
'is_letter_lecture': False # whether lectures are denoted by letters
}
data = []
print('Parsing rows...')
for tr in trs:
extract_data_from_row(tr, data, curr_state)
print('Done.')
return {
'schedules': data,
'semester': semester
}
|
|
import bioinf
from numpy import array, dot, arccos, rad2deg, ndarray, cross
from numpy.linalg import norm
from constants import *
from collections import OrderedDict, namedtuple
class PDBATOMFileReader(object):
def __init__(self, file_or_path):
self._parse_atom_lines(file_or_path)
self._set_residues_and_chains_of_each_atom()
self._set_chain_of_each_residue_and_add_it_to_itsown_chain()
def _parse_atom_lines(self, file_or_path):
if isinstance(file_or_path, basestring):
f = open(file_or_path, 'r')
else:
f = file_or_path
self._atoms = OrderedDict()
self._residues = OrderedDict()
self._chains = OrderedDict()
for line in f:
clean_line = line.strip()
if clean_line.startswith('ATOM'):
atom = AtomIQ(clean_line)
self._atoms[atom.serial] = atom
try:
self._chains[atom.chainID].add_atom(atom)
except KeyError:
self._chains[atom.chainID] = ChainIQ(atom)
try:
self._residues[atom.chainID + atom.uid].add_atom(atom)
except KeyError:
self._residues[atom.chainID + atom.uid] = ResidueIQ(atom)
f.close()
def _set_residues_and_chains_of_each_atom(self):
for atom in self._atoms.itervalues():
atom.set_Residue(self._residues[atom.chainID + atom.uid])
atom.set_Chain(self._chains[atom.chainID])
def _set_chain_of_each_residue_and_add_it_to_itsown_chain(self):
for residue in self._residues.itervalues():
residue.set_Chain(self._chains[residue.chainID])
self._chains[residue.chainID].add_residue(residue)
def __iter__(self):
for atom in self._atoms:
yield self._atoms[atom]
class AtomIQ(object):
def __init__(self, pdb_line):
assert isinstance(pdb_line, basestring)
pdb_atom_line = bioinf.PDBAtomLine.parse_string(pdb_line)
self._res_name = pdb_atom_line.resName
self._resSeq = pdb_atom_line.resSeq
self._name = pdb_atom_line.name
self._serial = pdb_atom_line.serial
self._residue = None
self._chain = None
self._chainID = pdb_atom_line.chainID
self._coordinates = array([
float(pdb_atom_line.x),
float(pdb_atom_line.y),
float(pdb_atom_line.z)
])
self._participant = \
HBondParticipant.generate_participant_by_valence(self)
def set_Residue(self, residue):
assert isinstance(residue, ResidueIQ)
assert residue.uid == self._resSeq
self._residue = residue
def set_Chain(self, chain):
assert isinstance(chain, ChainIQ)
assert chain.chainID == self._chainID
if self._chain is None:
self._chain = chain
else:
raise TypeError('chain was already set and thus was not None')
res_name = property(lambda self: self._res_name)
uid = property(lambda self: self._resSeq)
name = property(lambda self: self._name)
chainID = property(lambda self: self._chainID)
coordinates = property(lambda self: self._coordinates)
serial = property(lambda self: self._serial)
residue = property(lambda self: self._residue, set_Residue)
chain = property(lambda self: self._chain, set_Chain)
participant = property(lambda self: self._participant)
class HBondParticipant(object):
def __init__(self, atom,
is_donor=False, H_bond_donor_radius=None, max_num_H_donations=None,
is_acceptor=False, H_bond_acceptor_radius=None,
max_num_H_acceptance=None, NN=None, NNN=None):
assert isinstance(atom, AtomIQ)
self._atom = atom
self._is_acceptor = is_acceptor
self._is_donor = is_donor
self._H_bond_acceptor_radius = H_bond_acceptor_radius
self._H_bond_donor_radius = H_bond_donor_radius
self._max_num_H_acceptance = max_num_H_acceptance
self._max_num_H_donations = max_num_H_donations
self._NN = NN
self._NNN = NNN
self._acceptor_list = []
self._donor_list = []
self._backup_donors = []
self._backup_acceptors = []
@staticmethod
def _atom_in_group_is_Hbond_participant(atom, currentGroup, backbone_atom_name):
assert isinstance(atom, AtomIQ)
assert isinstance(currentGroup, HBondGroup)
assert backbone_atom_name in ('N', 'O')
return (
(atom.name in currentGroup.atoms_str_tupl and
atom.res_name == currentGroup.residue.upper())
or
(atom.name == backbone_atom_name and
currentGroup.residue == 'Peptide')
)
@staticmethod
def generate_participant_by_valence(atom):
assert isinstance(atom, AtomIQ)
backbone = namedtuple('backbone_Hbond_atom_name',
['donor','acceptor'])('N', 'O')
is_acceptor = False
is_donor = False
H_bond_donor_radius = None
max_num_H_donations = None
H_bond_acceptor_radius = None
max_num_H_acceptance = None
for currentDonorGroup in hbond_donor_groups:
if HBondParticipant._atom_in_group_is_Hbond_participant(
atom, currentDonorGroup, backbone.donor):
is_donor = True
valence = currentDonorGroup.valence
H_bond_donor_radius = currentDonorGroup.H_bond_radius
max_num_H_donations = currentDonorGroup.max_num_H_bonds
NN = currentDonorGroup.NN
NNN = currentDonorGroup.NNN
for currentAcceptorGroup in hbond_acceptor_groups:
if HBondParticipant._atom_in_group_is_Hbond_participant(
atom, currentAcceptorGroup, backbone.acceptor):
is_acceptor = True
valence = currentAcceptorGroup.valence
H_bond_acceptor_radius = currentDonorGroup.H_bond_radius
max_num_H_acceptance = currentDonorGroup.max_num_H_bonds
NN = currentAcceptorGroup.NN
NNN = currentAcceptorGroup.NNN
if is_acceptor or is_donor:
if valence == 'sp2':
return Sp2HBondParticipant(atom,
is_donor, H_bond_donor_radius, max_num_H_donations,
is_acceptor, H_bond_acceptor_radius, max_num_H_acceptance,
NN, NNN
)
elif valence == 'sp3':
return Sp3HBondParticipant(atom,
is_donor, H_bond_donor_radius, max_num_H_donations,
is_acceptor, H_bond_acceptor_radius, max_num_H_acceptance,
NN, NNN
)
else:
return None
def has_excessive_donors(self):
return len(self._donor_list) > self._max_num_H_donations
def has_excessive_acceptors(self):
return len(self._acceptor_list) > self._max_num_H_donations
is_acceptor = property(lambda self: self._is_acceptor)
is_donor = property(lambda self: self._is_donor)
H_bond_acceptor_radius = property(
lambda self: self._H_bond_acceptor_radius)
H_bond_donor_radius = property(lambda self: self._H_bond_donor_radius)
max_num_H_acceptance = property(lambda self: self._max_num_H_acceptance)
max_num_H_donations = property(lambda self: self._max_num_H_donations)
atom = property(lambda self: self._atom)
NN = property(lambda self: self._NN)
NNN = property(lambda self: self._NNN)
class AngleMinimum(namedtuple('AngleMinimum', ['as_donor', 'as_acceptor'])):
def angle_as_donor(self, donor=True):
return self.as_donor if donor else self.as_acceptor
class PlaneAngleMaximum(
namedtuple('AngleMinimum', ['as_donor', 'as_acceptor'])):
def angle_as_donor(self, donor=True):
return self.as_donor if donor else self.as_acceptor
class Sp3HBondParticipant(HBondParticipant):
_angle_min = AngleMinimum(90., 60.)
def _distance_is_ok(self, partner):
M = self._atom.coordinates
P = partner.atom.coordinates
distance = norm(M - P)
if distance < self._H_bond_donor_radius + partner.H_bond_acceptor_radius:
return distance
else:
return False
@staticmethod
def angle(ba, bc):
assert isinstance(ba, ndarray)
assert isinstance(bc, ndarray)
return rad2deg(arccos(dot(bc, ba) / (norm(bc) * norm(ba))))
def angle_is_ok(self, MtP, MtMM, as_donor=True):
angle = self.angle(MtP, MtMM)
return angle < 180. and angle > self._angle_min.angle_as_donor(as_donor)
def planarity_is_ok(self, MtP, MtMM, MMtMMM, as_donor=True):
return True
@staticmethod
def can_bond_to_partner(myself, partner, as_donor=True):
assert isinstance(myself, HBondParticipant)
assert isinstance(partner, HBondParticipant)
M = myself.atom.coordinates
P = partner.atom.coordinates
MM = myself.atom.residue.atoms[myself.NN].coordinates
MtoMM = MM - M
MtoP = P - M
if myself.angle_is_ok(MtoP, MtoMM, as_donor):
MMM = myself.atom.residue.atoms[myself.NNN].coordinates
MMtoMMM = MMM - MM
if myself.planarity_is_ok(MtoP, MtoMM, MMtoMMM, as_donor):
return True
def H_bond_is_mutual(self, partner):
assert isinstance(partner, HBondParticipant)
distance_or_is_ok = self._distance_is_ok(partner)
if distance_or_is_ok and \
self.can_bond_to_partner(self, partner) and \
self.can_bond_to_partner(partner, self, as_donor=False):
partner.append_donor_list(self)
self.append_acceptor_list(partner)
return distance_or_is_ok
def append_donor_list(self, potential_h_donor):
self.donor_list.append(potential_h_donor)
def append_acceptor_list(self, potential_h_acceptor):
self.acceptor_list.append(potential_h_acceptor)
valence = property(lambda valence:'sp3')
acceptor_list = property(lambda self: self._acceptor_list, append_acceptor_list)
donor_list = property(lambda self: self._donor_list, append_donor_list)
class Sp2HBondParticipant(Sp3HBondParticipant):
_angle_min = AngleMinimum(90., 90.)
_plane_angle_max = PlaneAngleMaximum(60., 90.)
@staticmethod
def planarity(ba, bc, cd):
assert isinstance(ba, ndarray)
assert isinstance(bc, ndarray)
assert isinstance(cd, ndarray)
my_plane_norm = cross(ba, bc)
perndclr_bc_in_plane = cross(bc, my_plane_norm)
torsion_angle_center = 0 if dot(cd, perndclr_bc_in_plane) > 0. else 180.
plane_norm_w_partner = cross(-bc, cd)
return abs(torsion_angle_center - Sp3HBondParticipant.angle(
my_plane_norm, plane_norm_w_partner))
def planarity_is_ok(self, MtP, MtMM, MMtMMM, as_donor=True):
plane_angle = self.planarity(MMtMMM, -MtMM, MtP)
return plane_angle < self._plane_angle_max.angle_as_donor(as_donor)
valence = property(lambda valence: 'sp2')
class ResidueIQ(object):
def __init__(self, atom):
assert isinstance(atom, AtomIQ)
self._atoms = {atom.name: atom}
self._abbr = atom.res_name
self._uid = atom.uid
self._chainID = atom.chainID
self._chain = None
def add_atom(self, atom):
assert isinstance(atom, AtomIQ)
assert self.uid == atom.uid
if atom.name not in self._atoms:
self._atoms[atom.name] = atom
def set_Chain(self, chain):
assert isinstance(chain, ChainIQ)
assert chain.chainID == self._chainID
if self._chain is None:
self._chain = chain
else:
raise TypeError('chain was already set and thus was not None')
atoms = property(lambda self: self._atoms, add_atom)
uid = property(lambda self: self._uid)
chainID = property(lambda self: self._chainID)
abbr = property(lambda self: self._abbr)
chain = property(lambda self: self._chain, set_Chain)
class ChainIQ(object):
def __init__(self, atom):
assert isinstance(atom, AtomIQ)
self._chainID = atom.chainID
self._atoms = OrderedDict({atom.serial: atom})
self._residues = OrderedDict({atom.uid: atom.residue})
def add_atom(self, atom):
assert isinstance(atom, AtomIQ)
if atom.serial not in self._atoms:
self._atoms[atom.serial] = atom
else:
raise KeyError('%s already exists in list of atoms for chain %s' %
(atom.serial, self._chainID))
def add_residue(self, residue):
assert isinstance(residue, ResidueIQ)
if residue.uid not in self._residues:
self._residues[residue.uid] = residue
atoms = property(lambda self: self._atoms, add_atom)
residues = property(lambda self: self._residues, add_residue)
chainID = property(lambda self: self._chainID)
class ProteinIQ(object):
def generate_protein_from_PDB_ATOM_File_Reader(pdb):
assert isinstance(pdb, PDBATOMFileReader)
donorDict = {}
acceptorDict = {}
atoms = {}
for atom in reader:
atoms[atom.serial] = atom
if atom.participant:
if atom.participant.is_donor:
donorDict[atom.serial] = atom
if atom.participant.is_acceptor:
acceptorDict[atom.serial] = atom
for donor in donorDict.itervalues():
for acceptor in acceptorDict.itervalues():
pass
pass
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pip._vendor.pyparsing import ( # noqa: N817
Forward,
Group,
Literal as L,
ParseException,
ParseResults,
QuotedString,
ZeroOrMore,
stringEnd,
stringStart,
)
from .specifiers import InvalidSpecifier, Specifier
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node:
def __init__(self, value: Any) -> None:
self.value = value
def __str__(self) -> str:
return str(self.value)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
class Undefined:
pass
_undefined = Undefined()
def _get_env(environment: Dict[str, str], name: str) -> str:
value: Union[str, Undefined] = environment.get(name, _undefined)
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
f"{name!r} does not exist in evaluation environment."
)
return value
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
raise InvalidMarker(
f"Invalid marker: {marker!r}, parse error at "
f"{marker[e.loc : e.loc + 8]!r}"
)
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
|
|
"""Run migrations."""
import typing as t
from functools import wraps
import peewee as pw
from playhouse.migrate import (
MySQLMigrator as MqM,
PostgresqlMigrator as PgM,
SchemaMigrator as ScM,
SqliteMigrator as SqM,
Operation, SQL, PostgresqlDatabase, operation, SqliteDatabase, MySQLDatabase,
make_index_name, Context
)
from peewee_migrate import LOGGER
class SchemaMigrator(ScM):
"""Implement migrations."""
@classmethod
def from_database(cls, database: pw.Database) -> ScM:
"""Initialize migrator by db."""
if isinstance(database, PostgresqlDatabase):
return PostgresqlMigrator(database)
if isinstance(database, SqliteDatabase):
return SqliteMigrator(database)
if isinstance(database, MySQLDatabase):
return MySQLMigrator(database)
return super(SchemaMigrator, cls).from_database(database)
def drop_table(self, model: pw.Model, cascade: bool = True) -> t.Callable:
"""Drop table."""
return lambda: model.drop_table(cascade=cascade)
@operation
def change_column(self, table: str, column_name: str, field: pw.Field) -> t.List[Operation]:
"""Change column."""
operations = [self.alter_change_column(table, column_name, field)]
if not field.null:
operations.extend([self.add_not_null(table, column_name)])
return operations
def alter_change_column(self, table: str, column: str, field: pw.Field) -> Context:
"""Support change columns."""
ctx = self.make_context()
field_null, field.null = field.null, True
ctx = self._alter_table(ctx, table).literal(' ALTER COLUMN ').sql(field.ddl(ctx))
field.null = field_null
return ctx
@operation
def sql(self, sql: str, *params) -> SQL:
"""Execute raw SQL."""
return SQL(sql, *params)
def alter_add_column(
self, table: str, column_name: str, field: pw.Field, **kwargs) -> Operation:
"""Fix fieldname for ForeignKeys."""
name = field.name
op = super(SchemaMigrator, self).alter_add_column(table, column_name, field, **kwargs)
if isinstance(field, pw.ForeignKeyField):
field.name = name
return op
class MySQLMigrator(SchemaMigrator, MqM):
"""Support MySQL."""
def alter_change_column(self, table: str, column: str, field: pw.Field) -> Context:
"""Support change columns."""
ctx = self.make_context()
field_null, field.null = field.null, True
ctx = self._alter_table(ctx, table).literal(' MODIFY COLUMN ').sql(field.ddl(ctx))
field.null = field_null
return ctx
class PostgresqlMigrator(SchemaMigrator, PgM):
"""Support the migrations in postgresql."""
def alter_change_column(self, table: str, column_name: str, field: pw.Field) -> Context:
"""Support change columns."""
context = super(PostgresqlMigrator, self).alter_change_column(table, column_name, field)
context._sql.insert(-1, 'TYPE')
context._sql.insert(-1, ' ')
return context
class SqliteMigrator(SchemaMigrator, SqM):
"""Support the migrations in sqlite."""
def drop_table(self, model: pw.Model, cascade: bool = True) -> t.Callable:
"""Sqlite doesnt support cascade syntax by default."""
return lambda: model.drop_table(cascade=False)
def alter_change_column(self, table: str, column: str, field: pw.Field) -> Operation:
"""Support change columns."""
return self._update_column(table, column, lambda a, b: b)
def get_model(method):
"""Convert string to model class."""
@wraps(method)
def wrapper(migrator, model, *args, **kwargs):
if isinstance(model, str):
return method(migrator, migrator.orm[model], *args, **kwargs)
return method(migrator, model, *args, **kwargs)
return wrapper
class Migrator(object):
"""Provide migrations."""
def __init__(self, database: t.Union[pw.Database, pw.Proxy]):
"""Initialize the migrator."""
if isinstance(database, pw.Proxy):
database = database.obj
self.database = database
self.orm: t.Dict[str, pw.Model] = {}
self.ops: t.List[Operation] = []
self.migrator = SchemaMigrator.from_database(self.database)
def run(self):
"""Run operations."""
for op in self.ops:
if isinstance(op, Operation):
LOGGER.info("%s %s", op.method, op.args)
op.run()
else:
op()
self.clean()
def python(self, func: t.Callable, *args, **kwargs):
"""Run python code."""
self.ops.append(lambda: func(*args, **kwargs))
def sql(self, sql: str, *params):
"""Execure raw SQL."""
self.ops.append(self.migrator.sql(sql, *params))
def clean(self):
"""Clean the operations."""
self.ops = list()
def create_table(self, model: pw.Model) -> pw.Model:
"""Create model and table in database.
>> migrator.create_table(model)
"""
self.orm[model._meta.table_name] = model
model._meta.database = self.database
self.ops.append(model.create_table)
return model
create_model = create_table
@get_model
def drop_table(self, model: pw.Model, cascade: bool = True):
"""Drop model and table from database.
>> migrator.drop_table(model, cascade=True)
"""
del self.orm[model._meta.table_name]
self.ops.append(self.migrator.drop_table(model, cascade))
remove_model = drop_table
@get_model
def add_columns(self, model: pw.Model, **fields: pw.Field) -> pw.Model:
"""Create new fields."""
for name, field in fields.items():
model._meta.add_field(name, field)
self.ops.append(self.migrator.add_column(
model._meta.table_name, field.column_name, field))
if field.unique:
self.ops.append(self.migrator.add_index(
model._meta.table_name, (field.column_name,), unique=True))
return model
add_fields = add_columns
@get_model
def change_columns(self, model: pw.Model, **fields: pw.Field) -> pw.Model:
"""Change fields."""
for name, field in fields.items():
old_field = model._meta.fields.get(name, field)
old_column_name = old_field and old_field.column_name
model._meta.add_field(name, field)
if isinstance(old_field, pw.ForeignKeyField):
self.ops.append(self.migrator.drop_foreign_key_constraint(
model._meta.table_name, old_column_name))
if old_column_name != field.column_name:
self.ops.append(
self.migrator.rename_column(
model._meta.table_name, old_column_name, field.column_name))
if isinstance(field, pw.ForeignKeyField):
on_delete = field.on_delete if field.on_delete else 'RESTRICT'
on_update = field.on_update if field.on_update else 'RESTRICT'
self.ops.append(self.migrator.add_foreign_key_constraint(
model._meta.table_name, field.column_name,
field.rel_model._meta.table_name, field.rel_field.name,
on_delete, on_update))
continue
self.ops.append(self.migrator.change_column(
model._meta.table_name, field.column_name, field))
if field.unique == old_field.unique:
continue
if field.unique:
index = (field.column_name,), field.unique
self.ops.append(self.migrator.add_index(model._meta.table_name, *index))
model._meta.indexes.append(index)
else:
index = (field.column_name,), old_field.unique
self.ops.append(self.migrator.drop_index(model._meta.table_name, *index))
model._meta.indexes.remove(index)
return model
change_fields = change_columns
@get_model
def drop_columns(self, model: pw.Model, *names: str, **kwargs) -> pw.Model:
"""Remove fields from model."""
fields = [field for field in model._meta.fields.values() if field.name in names]
cascade = kwargs.pop('cascade', True)
for field in fields:
self.__del_field__(model, field)
if field.unique:
index_name = make_index_name(model._meta.table_name, [field.column_name])
self.ops.append(self.migrator.drop_index(model._meta.table_name, index_name))
self.ops.append(
self.migrator.drop_column(
model._meta.table_name, field.column_name, cascade=cascade))
return model
remove_fields = drop_columns
def __del_field__(self, model: pw.Model, field: pw.Field):
"""Delete field from model."""
model._meta.remove_field(field.name)
delattr(model, field.name)
if isinstance(field, pw.ForeignKeyField):
obj_id_name = field.column_name
if field.column_name == field.name:
obj_id_name += '_id'
delattr(model, obj_id_name)
delattr(field.rel_model, field.backref)
@get_model
def rename_column(self, model: pw.Model, old_name: str, new_name: str) -> pw.Model:
"""Rename field in model."""
field = model._meta.fields[old_name]
if isinstance(field, pw.ForeignKeyField):
old_name = field.column_name
self.__del_field__(model, field)
field.name = field.column_name = new_name
model._meta.add_field(new_name, field)
if isinstance(field, pw.ForeignKeyField):
field.column_name = new_name = field.column_name + '_id'
self.ops.append(self.migrator.rename_column(model._meta.table_name, old_name, new_name))
return model
rename_field = rename_column
@get_model
def rename_table(self, model: pw.Model, new_name: str) -> pw.Model:
"""Rename table in database."""
old_name = model._meta.table_name
del self.orm[model._meta.table_name]
model._meta.table_name = new_name
self.orm[model._meta.table_name] = model
self.ops.append(self.migrator.rename_table(old_name, new_name))
return model
@get_model
def add_index(self, model: pw.Model, *columns: str, **kwargs) -> pw.Model:
"""Create indexes."""
unique = kwargs.pop('unique', False)
model._meta.indexes.append((columns, unique))
columns_ = []
for col in columns:
field = model._meta.fields.get(col)
if len(columns) == 1:
field.unique = unique
field.index = not unique
if isinstance(field, pw.ForeignKeyField):
col = col + '_id'
columns_.append(col)
self.ops.append(self.migrator.add_index(model._meta.table_name, columns_, unique=unique))
return model
@get_model
def drop_index(self, model: pw.Model, *columns: str) -> pw.Model:
"""Drop indexes."""
columns_ = []
for col in columns:
field = model._meta.fields.get(col)
if not field:
continue
if len(columns) == 1:
field.unique = field.index = False
if isinstance(field, pw.ForeignKeyField):
col = col + '_id'
columns_.append(col)
index_name = make_index_name(model._meta.table_name, columns_)
model._meta.indexes = [(cols, _) for (cols, _) in model._meta.indexes if columns != cols]
self.ops.append(self.migrator.drop_index(model._meta.table_name, index_name))
return model
@get_model
def add_not_null(self, model: pw.Model, *names: str) -> pw.Model:
"""Add not null."""
for name in names:
field = model._meta.fields[name]
field.null = False
self.ops.append(self.migrator.add_not_null(model._meta.table_name, field.column_name))
return model
@get_model
def drop_not_null(self, model: pw.Model, *names: str) -> pw.Model:
"""Drop not null."""
for name in names:
field = model._meta.fields[name]
field.null = True
self.ops.append(self.migrator.drop_not_null(model._meta.table_name, field.column_name))
return model
@get_model
def add_default(self, model: pw.Model, name: str, default: t.Any) -> pw.Model:
"""Add default."""
field = model._meta.fields[name]
model._meta.defaults[field] = field.default = default
self.ops.append(self.migrator.apply_default(model._meta.table_name, name, field))
return model
# pylama:ignore=W0223,W0212,R
|
|
"""
This script is used to design the design matrix for our linear regression.
We explore the influence of linear and quadratic drifts on the model
performance.
Script for the filtered data.
Run with:
python noise-pca_filtered_script.py
from this directory
"""
from __future__ import print_function, division
import sys, os, pdb
from scipy import ndimage
from scipy.ndimage import gaussian_filter
from matplotlib import colors
from os.path import splitext
from scipy.stats import t as t_dist
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import scipy
import pprint as pp
import json
#Specicy the path for functions
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./"))
from smoothing import *
from diagnostics import *
from glm import *
from plot_mosaic import *
from mask_filtered_data import *
# Locate the paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
path_dict = {'data_filtered':{
'type' : 'filtered',
'feat' : '.feat',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/'
},
'data_original':{
'type' : '',
'feat': '',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
#run_list = [str(i) for i in range(1,4)]
# Run only for subject 1 and 5 - run 1
run_list = [str(i) for i in range(1,2)]
subject_list = ['1', '5']
d_path = path_dict['data_filtered'] #OR original or filtered
images_paths = [('ds005' + '_sub' + s.zfill(3) + '_t1r' + r, \
data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])) \
for r in run_list \
for s in subject_list]
# set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
# Mask
# To be used with the normal data
thres = 375 #From analysis of the histograms
# To be used with the filtered data
mask_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_mask_2mm.nii'
template_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii'
sm = ''
#sm='not_smooth/'
project_path = project_path + sm
# Create the needed directories if they do not exist
dirs = [project_path+'fig/',\
project_path+'fig/BOLD',\
project_path+'fig/drifts',\
project_path+'fig/pca',\
project_path+'fig/pca/projections/',\
project_path+'fig/linear_model/mosaic',\
project_path+'fig/linear_model/mosaic/middle_slice',\
project_path+'txt_output/',\
project_path+'txt_output/MRSS/',\
project_path+'txt_output/pca/',\
project_path+'txt_output/drifts/']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Progress bar
print("\nStarting noise-pca for filtered data analysis\n")
for image_path in images_paths:
name = image_path[0]
if d_path['type']=='filtered':
#in_brain_img = nib.load('../../../'+
# 'data/ds005/sub001/model/model001/task001_run001.feat/'\
# + 'masked_filtered_func_data_mni.nii.gz')
# Image shape (91, 109, 91, 240)
md = data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/masked_%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])
if not os.path.exists(md):
print("Filtering brain image for: ")
print(str(name))
in_brain_img = make_mask_filtered_data(image_path[1],mask_path)
print("brain image filtered\n")
else:
print("Loading filtered brain image for: ")
print(str(name))
in_brain_img = nib.load(md)
print("brain image loaded\n")
data_int = in_brain_img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
template = nib.load(template_path)
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
Transpose = False
in_brain_mask = (mean_data - 0.0) < 0.01
plt.imshow(plot_mosaic(template_data, transpose=Transpose),\
cmap='gray', alpha=1)
else:
img = nib.load(image_path[1])
data = img.get_data()
mean_data = np.mean(data, axis=-1)
in_brain_mask = mean_data > thres
Transpose = True
plt.contour(plot_mosaic(in_brain_mask, transpose=Transpose), \
cmap='gray' , alpha=1)
# Smoothing with Gaussian filter
smooth_data = smoothing(data,1,range(data.shape[-1]))
# Selecting the voxels in the brain
in_brain_tcs = smooth_data[in_brain_mask, :]
#in_brain_tcs = data[in_brain_mask, :]
vol_shape = data.shape[:-1]
# Plotting the voxels in the brain
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.colorbar()
plt.title('In brain voxel mean values' + '\n' + (d_path['type'] + str(name)))
plt.savefig(project_path+'fig/BOLD/%s_mean_voxels.png'\
%(d_path['type'] + str(name)))
#plt.show()
#plt.clf()
# Convolution with 1 to 4 conditions
convolved = np.zeros((240,5))
for i in range(1,5):
#convolved = np.loadtxt(\
# '../../../txt_output/conv_normal/%s_conv_00%s_canonical.txt'\
# %(str(name),str(i)))
convolved[:,i] = np.loadtxt(\
'../../../txt_output/conv_high_res/%s_conv_00%s_high_res.txt'\
%(str(name),str(i)))
reg_str = ['Intercept','Task', 'Gain', 'Loss', 'Distance', 'Linear Drift',\
'Quadratic drift', 'PC#1', 'PC#2', 'PC#3', 'PC#4']
# Create design matrix X - Including drifts
P = 7 #number of regressors of X including the ones for intercept
n_trs = data.shape[-1]
X = np.ones((n_trs, P))
for i in range(1,5):
X[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X[:,6] = quadratic_drift
# Save the design matrix
np.savetxt(project_path+\
'txt_output/drifts/%s_design_matrix_with_drift.txt'\
%(d_path['type'] + str(name)), X)
# Linear Model - Including drifts
Y = in_brain_tcs.T
betas = npl.pinv(X).dot(Y)
# Save the betas for the linear model including drifts
np.savetxt(project_path+\
'txt_output/drifts/%s_betas_with_drift.txt'%(d_path['type'] + str(name)), betas)
betas_vols = np.zeros(vol_shape + (P,))
betas_vols[in_brain_mask] = betas.T
# Plot
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
betas_vols[~in_brain_mask] = np.nan
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P):
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
#plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with drift) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withdrift_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
plt.close()
#plt.show()
#plt.clf()
#Show the middle slice only
plt.imshow(betas_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel - Slice 18 \n' \
'Projection on %s - %s'\
%(str(reg_str[k]), d_path['type'] + str(name)))
plt.savefig(\
project_path+'fig/linear_model/mosaic/middle_slice/%s_withdrift_middleslice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# PCA Analysis
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[in_brain_mask, :] = projections.T
# Plot the projection of the data on the 5 first principal component
# from SVD
for i in range(1,5):
plt.plot(U[:, i])
plt.title('U' + str(i) + ' vector from SVD \n' + str(name))
plt.imshow(projection_vols[:, :, 18, i])
plt.colorbar()
plt.title('PCA - 18th slice projection on PC#' + str(i) + ' from SVD \n ' +\
d_path['type'] + str(name))
plt.savefig(project_path+'fig/pca/projections/%s_PC#%s.png' \
%((d_path['type'] + str(name),str(i))))
#plt.show()
#plt.clf()
plt.close()
# Variance Explained analysis
s = []
#S is diag -> trace = sum of the elements of S
for i in S:
s.append(i/np.sum(S))
np.savetxt(project_path+\
'txt_output/pca/%s_variance_explained' % (d_path['type'] + str(name)) +\
'.txt', np.array(s[:40]))
ind = np.arange(len(s[1:40]))
plt.bar(ind, s[1:40], width=0.5)
plt.xlabel('Principal Components indices')
plt.ylabel('Explained variance in percent')
plt.title('Variance explained graph \n' + (d_path['type'] + str(name)))
plt.savefig(project_path+\
'fig/pca/%s_variance_explained.png' %(d_path['type'] + str(name)))
#plt.show()
plt.close()
# Linear Model - including PCs from PCA analysis
PC = 3 # Number of PCs to include in the design matrix
P_pca = P + PC
X_pca = np.ones((n_trs, P_pca))
for i in range(1,5):
X_pca[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X_pca[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_pca[:,6] = quadratic_drift
for i in range(3):
X_pca[:,7+i] = U[:, i]
# Save the design matrix - with PCs
np.savetxt(project_path+'txt_output/pca/%s_design_matrix_pca.txt'\
%(d_path['type'] + str(name)), X_pca)
#plt.imshow(X_pca, aspect=0.25)
B_pca = npl.pinv(X_pca).dot(Y)
np.savetxt(project_path+'txt_output/pca/%s_betas_pca.txt'\
%(d_path['type'] + str(name)), B_pca)
b_pca_vols = np.zeros(vol_shape + (P_pca,))
b_pca_vols[in_brain_mask, :] = B_pca.T
# Save betas as nii files
# Plot - with PCs
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
b_pca_vols[~in_brain_mask] = np.nan
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P_pca):
fig = plt.figure(figsize = (8, 5))
#plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap='gray', alpha=0.5)
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with PCA) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withPCA_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
#plt.show()
plt.close()
#Show the middle slice only
plt.imshow(b_pca_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel model - Slice 18 \n' \
'Projection on X%s \n %s'\
%(str(reg_str[k]),d_path['type'] + str(name)))
plt.savefig(\
project_path+\
'fig/linear_model/mosaic/middle_slice/%s_withPCA_middle_slice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# Residuals
MRSS_dict = {}
MRSS_dict['ds005' + d_path['type']] = {}
MRSS_dict['ds005' + d_path['type']]['drifts'] = {}
MRSS_dict['ds005' + d_path['type']]['pca'] = {}
for z in MRSS_dict['ds005' + d_path['type']]:
MRSS_dict['ds005' + d_path['type']][z]['MRSS'] = []
residuals = Y - X.dot(betas)
df = X.shape[0] - npl.matrix_rank(X)
MRSS = np.sum(residuals ** 2 , axis=0) / df
residuals_pca = Y - X_pca.dot(B_pca)
df_pca = X_pca.shape[0] - npl.matrix_rank(X_pca)
MRSS_pca = np.sum(residuals_pca ** 2 , axis=0) / df_pca
MRSS_dict['ds005' + d_path['type']]['drifts']['mean_MRSS'] = np.mean(MRSS)
MRSS_dict['ds005' + d_path['type']]['pca']['mean_MRSS'] = np.mean(MRSS_pca)
# Save the mean MRSS values to compare the performance
# of the design matrices
for design_matrix, beta, mrss, name in \
[(X, betas, MRSS, 'drifts'), (X_pca, B_pca, MRSS_pca, 'pca')]:
MRSS_dict['ds005' + d_path['type']][name]['p-values'] = []
MRSS_dict['ds005' + d_path['type']][name]['t-test'] = []
with open(project_path+'txt_output/MRSS/ds005%s_MRSS.json'\
%(d_path['type']), 'w') as file_out:
json.dump(MRSS_dict, file_out)
# SE = np.zeros(beta.shape)
# for i in range(design_matrix.shape[-1]):
# c = np.zeros(design_matrix.shape[-1])
# c[i]=1
# c = np.atleast_2d(c).T
# SE[i,:]= np.sqrt(\
# mrss* c.T.dot(npl.pinv(design_matrix.T.dot(design_matrix)).dot(c)))
# zeros = np.where(SE==0)
# SE[zeros] = 1
# t = beta / SE
# t[:,zeros] = 0
# # Get p value for t value using CDF of t didstribution
# ltp = t_dist.cdf(abs(t), df)
# p = 1 - ltp # upper tail
# t_brain = t[in_brain_mask]
# p_brain = p[in_brain_mask]
#
# # Save 3D data in .nii files
# for k in range(1,4):
# t_nib = nib.Nifti1Image(t_brain[..., k], affine)
# nib.save(t-test, project_path+'txt_output/%s/%s_t-test_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# p_nib = nib.Nifti1Image(p_brain[..., k], affine)
# nib.save(p-values,project_path+'txt_output/%s/%s_p-values_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# pdb.set_trace()
# pdb.set_trace()
plt.close()
print("=")
print("======================================")
print("\n Noise and PCA analysis for filtered data done")
print("Design Matrix including drift terms stored in project_epsilon/txt_output/drifts/ \n\n")
print("Design Matrix including PCs terms stored in project_epsilon/txt_output/pca/\n\n")
print("Mean MRSS models results in project_epsilon/txt_output/MRSS/ds005filtered_MRSS.json\n\n")
|
|
from keras import backend as K
from keras import activations
from overrides import overrides
from .word_alignment import WordAlignmentEntailment
from ..attention import WeightedSum
from ...tensors.backend import switch, apply_feed_forward
class DecomposableAttentionEntailment(WordAlignmentEntailment):
"""
This layer is a reimplementation of the entailment algorithm described in
"A Decomposable Attention Model for Natural Language Inference", Parikh et
al., 2016. The algorithm has three main steps:
(1) Attend: Compute dot products between all pairs of projections of words
in the hypothesis and the premise, normalize those dot products to use
them to align each word in premise to a phrase in the hypothesis and
vice-versa. These alignments are then used to summarize the aligned
phrase in the other sentence as a weighted sum. The initial word
projections are computed using a feed forward NN, F.
(2) Compare: Pass a concatenation of each word in the premise and the
summary of its aligned phrase in the hypothesis through a feed forward
NN, G, to get a projected comparison. Do the same with the hypothesis
and the aligned phrase from the premise.
(3) Aggregate: Sum over the comparisons to get a single vector each for
premise-hypothesis comparison, and hypothesis-premise comparison. Pass
them through a third feed forward NN (H), to get the entailment
decision.
This layer can take either a tuple (premise, hypothesis) or a concatenation
of them as input.
Input:
- Tuple input: a premise sentence and a hypothesis sentence, both with shape ``(batch_size,
sentence_length, embed_dim)`` and masks of shape ``(batch_size, sentence_length)``
- Single input: a single tensor of shape ``(batch_size, sentence_length * 2, embed_dim)``, with
a mask of shape ``(batch_size, sentence_length * 2)``, which we will split in half to get the
premise and hypothesis sentences.
Output:
- Entailment decisions with the given ``output_dim``.
Parameters
----------
num_hidden_layers: int, optional (default=1)
Number of hidden layers in each of the feed forward neural nets described above.
hidden_layer_width: int, optional (default=50)
Width of each hidden layer in each of the feed forward neural nets described above.
hidden_layer_activation: str, optional (default='relu')
Activation for each hidden layer in each of the feed forward neural nets described above.
final_activation: str, optional (default='softmax')
Activation to use for the final output. Should almost certainly be 'softmax'.
output_dim: int, optional (default=3)
Dimensionality of the final output. If this is the last layer in your model, this needs to
be the same as the number of labels you have.
initializer: str, optional (default='uniform')
Will be passed to ``self.add_weight()`` for each of the weight matrices in the feed forward
neural nets described above.
Notes
-----
premise_length = hypothesis_length = sentence_length below.
"""
def __init__(self,
num_hidden_layers: int=1,
hidden_layer_width: int=50,
hidden_layer_activation: str='relu',
final_activation: str='softmax',
output_dim: int=3,
initializer: str='uniform',
**kwargs):
self.num_hidden_layers = num_hidden_layers
self.hidden_layer_width = hidden_layer_width
self.hidden_layer_activation = hidden_layer_activation
self.final_activation = final_activation
self.output_dim = output_dim
self.initializer = initializer
# Weights will be initialized in the build method.
self.premise_length = None
self.hypothesis_length = None
self.attend_weights = [] # weights related to F
self.compare_weights = [] # weights related to G
self.aggregate_weights = [] # weights related to H
self.scorer = None
super(DecomposableAttentionEntailment, self).__init__(**kwargs)
@overrides
def build(self, input_shape):
'''
This model has three feed forward NNs (F, G and H in the paper). We assume that all three
NNs have the same hyper-parameters: num_hidden_layers, hidden_layer_width and
hidden_layer_activation. That is, F, G and H have the same structure and activations. Their
actual weights are different, though. H has a separate softmax layer at the end.
'''
super(DecomposableAttentionEntailment, self).build(input_shape)
if isinstance(input_shape, list):
# input_shape is a list containing the shapes of the two inputs.
self.premise_length = input_shape[0][1]
self.hypothesis_length = input_shape[1][1]
# input_dim below is embedding dim for the model in the paper since they feed embedded
# input directly into this layer.
self.input_dim = input_shape[0][-1]
else:
# NOTE: This will probably fail silently later on in this code if your premise and
# hypothesis actually have different lengths.
self.premise_length = self.hypothesis_length = int(input_shape[1] / 2)
self.input_dim = input_shape[-1]
attend_input_dim = self.input_dim
compare_input_dim = 2 * self.input_dim
aggregate_input_dim = self.hidden_layer_width * 2
for i in range(self.num_hidden_layers):
self.attend_weights.append(self.add_weight(shape=(attend_input_dim, self.hidden_layer_width),
name='%s_attend_%d' % (self.name, i),
initializer=self.initializer))
self.compare_weights.append(self.add_weight(shape=(compare_input_dim, self.hidden_layer_width),
name='%s_compare_%d' % (self.name, i),
initializer=self.initializer))
self.aggregate_weights.append(self.add_weight(shape=(aggregate_input_dim, self.hidden_layer_width),
name='%s_aggregate_%d' % (self.name, i),
initializer=self.initializer))
attend_input_dim = self.hidden_layer_width
compare_input_dim = self.hidden_layer_width
aggregate_input_dim = self.hidden_layer_width
self.scorer = self.add_weight(shape=(self.hidden_layer_width, self.output_dim),
initializer=self.initializer,
name='%s_score' % self.name)
@overrides
def compute_output_shape(self, input_shape):
# (batch_size, 2)
if isinstance(input_shape, list):
return (input_shape[0][0], self.output_dim)
else:
return (input_shape[0], self.output_dim)
@overrides
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
return None
@overrides
def call(self, inputs, mask=None):
# premise_length = hypothesis_length in the following lines, but the names are kept separate to keep
# track of the axes being normalized.
# The inputs can be a two different tensors, or a concatenation. Hence, the conditional below.
if isinstance(inputs, list) or isinstance(inputs, tuple):
premise_embedding, hypothesis_embedding = inputs
# (batch_size, premise_length), (batch_size, hypothesis_length)
premise_mask, hypothesis_mask = mask
else:
premise_embedding = inputs[:, :self.premise_length, :]
hypothesis_embedding = inputs[:, self.premise_length:, :]
# (batch_size, premise_length), (batch_size, hypothesis_length)
premise_mask = None if mask is None else mask[:, :self.premise_length]
hypothesis_mask = None if mask is None else mask[:, self.premise_length:]
if premise_mask is not None:
premise_embedding = switch(K.expand_dims(premise_mask), premise_embedding,
K.zeros_like(premise_embedding))
if hypothesis_mask is not None:
hypothesis_embedding = switch(K.expand_dims(hypothesis_mask), hypothesis_embedding,
K.zeros_like(hypothesis_embedding))
activation = activations.get(self.hidden_layer_activation)
# (batch_size, premise_length, hidden_dim)
projected_premise = apply_feed_forward(premise_embedding, self.attend_weights, activation)
# (batch_size, hypothesis_length, hidden_dim)
projected_hypothesis = apply_feed_forward(hypothesis_embedding, self.attend_weights, activation)
## Step 1: Attend
p2h_alignment = self._align(projected_premise, projected_hypothesis, premise_mask, hypothesis_mask)
# beta in the paper (equation 2)
# (batch_size, premise_length, emb_dim)
p2h_attention = self._attend(hypothesis_embedding, p2h_alignment)
h2p_alignment = self._align(projected_hypothesis, projected_premise, hypothesis_mask, premise_mask)
# alpha in the paper (equation 2)
# (batch_size, hyp_length, emb_dim)
h2p_attention = self._attend(premise_embedding, h2p_alignment)
## Step 2: Compare
# Equation 3 in the paper.
compared_premise = self._compare(premise_embedding, p2h_attention)
compared_hypothesis = self._compare(hypothesis_embedding, h2p_attention)
## Step 3: Aggregate
# Equations 4 and 5.
# (batch_size, hidden_dim * 2)
aggregated_input = K.concatenate([K.sum(compared_premise, axis=1), K.sum(compared_hypothesis, axis=1)])
# (batch_size, hidden_dim)
input_to_scorer = apply_feed_forward(aggregated_input, self.aggregate_weights, activation)
# (batch_size, 2)
final_activation = activations.get(self.final_activation)
scores = final_activation(K.dot(input_to_scorer, self.scorer))
return scores
@staticmethod
def _attend(target_embedding, s2t_alignment):
'''
Takes target embedding, and source-target alignment attention and produces a weighted average of the
target embedding per each source word.
target_embedding: (batch_size, target_length, embed_dim)
s2t_alignment: (batch_size, source_length, target_length)
'''
# NOTE: This Layer was written before we had things like WeightedSum. We could probably
# implement this whole thing a lot more easily now, but I'm just replacing bits of it at a
# time.
return WeightedSum().call([target_embedding, s2t_alignment])
def _compare(self, source_embedding, s2t_attention):
'''
Takes word embeddings from a sentence, and aggregated representations of words aligned to each of those
words from another sentence, and returns a projection of their concatenation.
source_embedding: (batch_size, source_length, embed_dim)
s2t_attention: (batch_size, source_length, embed_dim)
'''
activation = activations.get(self.hidden_layer_activation)
comparison_input = K.concatenate([source_embedding, s2t_attention])
# Equation 3 in the paper.
compared_representation = apply_feed_forward(comparison_input, self.compare_weights, activation)
return compared_representation
@overrides
def get_config(self):
config = {
'num_hidden_layers': self.num_hidden_layers,
'hidden_layer_width': self.hidden_layer_width,
'hidden_layer_activation': self.hidden_layer_activation,
'final_activation': self.final_activation,
'output_dim': self.output_dim,
'initializer': self.initializer,
}
base_config = super(DecomposableAttentionEntailment, self).get_config()
config.update(base_config)
return config
|
|
import os
import sys
import pytest
from mock import Mock
import pip._internal.req.req_uninstall
from pip._internal.req.req_uninstall import (
StashedUninstallPathSet,
UninstallPathSet,
UninstallPthEntries,
compact,
compress_for_output_listing,
compress_for_rename,
uninstallation_paths,
)
from tests.lib import create_file
# Pretend all files are local, so UninstallPathSet accepts files in the tmpdir,
# outside the virtualenv
def mock_is_local(path):
return True
def test_uninstallation_paths():
class dist(object):
def get_metadata_lines(self, record):
return ['file.py,,',
'file.pyc,,',
'file.so,,',
'nopyc.py']
location = ''
d = dist()
paths = list(uninstallation_paths(d))
expected = ['file.py',
'file.pyc',
'file.pyo',
'file.so',
'nopyc.py',
'nopyc.pyc',
'nopyc.pyo']
assert paths == expected
# Avoid an easy 'unique generator' bug
paths2 = list(uninstallation_paths(d))
assert paths2 == paths
def test_compressed_listing(tmpdir):
def in_tmpdir(paths):
li = []
for path in paths:
li.append(
str(os.path.join(tmpdir, path.replace("/", os.path.sep)))
)
return li
sample = in_tmpdir([
"lib/mypkg.dist-info/METADATA",
"lib/mypkg.dist-info/PKG-INFO",
"lib/mypkg/would_be_removed.txt",
"lib/mypkg/would_be_skipped.skip.txt",
"lib/mypkg/__init__.py",
"lib/mypkg/my_awesome_code.py",
"lib/mypkg/__pycache__/my_awesome_code-magic.pyc",
"lib/mypkg/support/support_file.py",
"lib/mypkg/support/more_support.py",
"lib/mypkg/support/would_be_skipped.skip.py",
"lib/mypkg/support/__pycache__/support_file-magic.pyc",
"lib/random_other_place/file_without_a_dot_pyc",
"bin/mybin",
])
# Create the required files
for fname in sample:
create_file(fname, "random blub")
# Remove the files to be skipped from the paths
sample = [path for path in sample if ".skip." not in path]
expected_remove = in_tmpdir([
"bin/mybin",
"lib/mypkg.dist-info/*",
"lib/mypkg/*",
"lib/random_other_place/file_without_a_dot_pyc",
])
expected_skip = in_tmpdir([
"lib/mypkg/would_be_skipped.skip.txt",
"lib/mypkg/support/would_be_skipped.skip.py",
])
expected_rename = in_tmpdir([
"bin/",
"lib/mypkg.dist-info/",
"lib/mypkg/would_be_removed.txt",
"lib/mypkg/__init__.py",
"lib/mypkg/my_awesome_code.py",
"lib/mypkg/__pycache__/",
"lib/mypkg/support/support_file.py",
"lib/mypkg/support/more_support.py",
"lib/mypkg/support/__pycache__/",
"lib/random_other_place/",
])
will_remove, will_skip = compress_for_output_listing(sample)
will_rename = compress_for_rename(sample)
assert sorted(expected_skip) == sorted(compact(will_skip))
assert sorted(expected_remove) == sorted(compact(will_remove))
assert sorted(expected_rename) == sorted(compact(will_rename))
class TestUninstallPathSet(object):
def test_add(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local',
mock_is_local)
# Fix case for windows tests
file_extant = os.path.normcase(os.path.join(tmpdir, 'foo'))
file_nonexistent = os.path.normcase(
os.path.join(tmpdir, 'nonexistent'))
with open(file_extant, 'w'):
pass
ups = UninstallPathSet(dist=Mock())
assert ups.paths == set()
ups.add(file_extant)
assert ups.paths == {file_extant}
ups.add(file_nonexistent)
assert ups.paths == {file_extant}
def test_add_pth(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local',
mock_is_local)
# Fix case for windows tests
tmpdir = os.path.normcase(tmpdir)
on_windows = sys.platform == 'win32'
pth_file = os.path.join(tmpdir, 'foo.pth')
relative = '../../example'
if on_windows:
share = '\\\\example\\share\\'
share_com = '\\\\example.com\\share\\'
# Create a .pth file for testing
with open(pth_file, 'w') as f:
f.writelines([tmpdir, '\n',
relative, '\n'])
if on_windows:
f.writelines([share, '\n',
share_com, '\n'])
# Add paths to be removed
pth = UninstallPthEntries(pth_file)
pth.add(tmpdir)
pth.add(relative)
if on_windows:
pth.add(share)
pth.add(share_com)
# Check that the paths were added to entries
if on_windows:
check = set([tmpdir, relative, share, share_com])
else:
check = set([tmpdir, relative])
assert pth.entries == check
@pytest.mark.skipif("sys.platform == 'win32'")
def test_add_symlink(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local',
mock_is_local)
f = os.path.join(tmpdir, 'foo')
with open(f, 'w'):
pass
foo_link = os.path.join(tmpdir, 'foo_link')
os.symlink(f, foo_link)
ups = UninstallPathSet(dist=Mock())
ups.add(foo_link)
assert ups.paths == {foo_link}
def test_compact_shorter_path(self, monkeypatch):
monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local',
mock_is_local)
monkeypatch.setattr('os.path.exists', lambda p: True)
# This deals with nt/posix path differences
short_path = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'path')))
ups = UninstallPathSet(dist=Mock())
ups.add(short_path)
ups.add(os.path.join(short_path, 'longer'))
assert compact(ups.paths) == {short_path}
@pytest.mark.skipif("sys.platform == 'win32'")
def test_detect_symlink_dirs(self, monkeypatch, tmpdir):
monkeypatch.setattr(pip._internal.req.req_uninstall, 'is_local',
mock_is_local)
# construct 2 paths:
# tmpdir/dir/file
# tmpdir/dirlink/file (where dirlink is a link to dir)
d = tmpdir.joinpath('dir')
d.mkdir()
dlink = tmpdir.joinpath('dirlink')
os.symlink(d, dlink)
d.joinpath('file').touch()
path1 = str(d.joinpath('file'))
path2 = str(dlink.joinpath('file'))
ups = UninstallPathSet(dist=Mock())
ups.add(path1)
ups.add(path2)
assert ups.paths == {path1}
class TestStashedUninstallPathSet(object):
WALK_RESULT = [
("A", ["B", "C"], ["a.py"]),
("A/B", ["D"], ["b.py"]),
("A/B/D", [], ["c.py"]),
("A/C", [], ["d.py", "e.py"]),
("A/E", ["F"], ["f.py"]),
("A/E/F", [], []),
("A/G", ["H"], ["g.py"]),
("A/G/H", [], ["h.py"]),
]
@classmethod
def mock_walk(cls, root):
for dirname, subdirs, files in cls.WALK_RESULT:
dirname = os.path.sep.join(dirname.split("/"))
if dirname.startswith(root):
yield dirname[len(root) + 1:], subdirs, files
def test_compress_for_rename(self, monkeypatch):
paths = [os.path.sep.join(p.split("/")) for p in [
"A/B/b.py",
"A/B/D/c.py",
"A/C/d.py",
"A/E/f.py",
"A/G/g.py",
]]
expected_paths = [os.path.sep.join(p.split("/")) for p in [
"A/B/", # selected everything below A/B
"A/C/d.py", # did not select everything below A/C
"A/E/", # only empty folders remain under A/E
"A/G/g.py", # non-empty folder remains under A/G
]]
monkeypatch.setattr('os.walk', self.mock_walk)
actual_paths = compress_for_rename(paths)
assert set(expected_paths) == set(actual_paths)
@classmethod
def make_stash(cls, tmpdir, paths):
for dirname, subdirs, files in cls.WALK_RESULT:
root = os.path.join(tmpdir, *dirname.split("/"))
if not os.path.exists(root):
os.mkdir(root)
for d in subdirs:
os.mkdir(os.path.join(root, d))
for f in files:
with open(os.path.join(root, f), "wb"):
pass
pathset = StashedUninstallPathSet()
paths = [os.path.join(tmpdir, *p.split('/')) for p in paths]
stashed_paths = [(p, pathset.stash(p)) for p in paths]
return pathset, stashed_paths
def test_stash(self, tmpdir):
pathset, stashed_paths = self.make_stash(tmpdir, [
"A/B/", "A/C/d.py", "A/E/", "A/G/g.py",
])
for old_path, new_path in stashed_paths:
assert not os.path.exists(old_path)
assert os.path.exists(new_path)
assert stashed_paths == pathset._moves
def test_commit(self, tmpdir):
pathset, stashed_paths = self.make_stash(tmpdir, [
"A/B/", "A/C/d.py", "A/E/", "A/G/g.py",
])
pathset.commit()
for old_path, new_path in stashed_paths:
assert not os.path.exists(old_path)
assert not os.path.exists(new_path)
def test_rollback(self, tmpdir):
pathset, stashed_paths = self.make_stash(tmpdir, [
"A/B/", "A/C/d.py", "A/E/", "A/G/g.py",
])
pathset.rollback()
for old_path, new_path in stashed_paths:
assert os.path.exists(old_path)
assert not os.path.exists(new_path)
@pytest.mark.skipif("sys.platform == 'win32'")
def test_commit_symlinks(self, tmpdir):
adir = tmpdir / "dir"
adir.mkdir()
dirlink = tmpdir / "dirlink"
dirlink.symlink_to(adir)
afile = tmpdir / "file"
afile.write_text("...")
filelink = tmpdir / "filelink"
filelink.symlink_to(afile)
pathset = StashedUninstallPathSet()
stashed_paths = []
stashed_paths.append(pathset.stash(dirlink))
stashed_paths.append(pathset.stash(filelink))
for stashed_path in stashed_paths:
assert os.path.lexists(stashed_path)
assert not os.path.exists(dirlink)
assert not os.path.exists(filelink)
pathset.commit()
# stash removed, links removed
for stashed_path in stashed_paths:
assert not os.path.lexists(stashed_path)
assert not os.path.lexists(dirlink) and not os.path.isdir(dirlink)
assert not os.path.lexists(filelink) and not os.path.isfile(filelink)
# link targets untouched
assert os.path.isdir(adir)
assert os.path.isfile(afile)
@pytest.mark.skipif("sys.platform == 'win32'")
def test_rollback_symlinks(self, tmpdir):
adir = tmpdir / "dir"
adir.mkdir()
dirlink = tmpdir / "dirlink"
dirlink.symlink_to(adir)
afile = tmpdir / "file"
afile.write_text("...")
filelink = tmpdir / "filelink"
filelink.symlink_to(afile)
pathset = StashedUninstallPathSet()
stashed_paths = []
stashed_paths.append(pathset.stash(dirlink))
stashed_paths.append(pathset.stash(filelink))
for stashed_path in stashed_paths:
assert os.path.lexists(stashed_path)
assert not os.path.lexists(dirlink)
assert not os.path.lexists(filelink)
pathset.rollback()
# stash removed, links restored
for stashed_path in stashed_paths:
assert not os.path.lexists(stashed_path)
assert os.path.lexists(dirlink) and os.path.isdir(dirlink)
assert os.path.lexists(filelink) and os.path.isfile(filelink)
# link targets untouched
assert os.path.isdir(adir)
assert os.path.isfile(afile)
|
|
from collections import namedtuple
import glob
import pickle
from pathlib import Path
import re
import numpy as np
import pandas as pd
from ..data.survey_utils import ExperimentType
DATA_DIR = Path(__file__).parent.joinpath("../../data")
SURVEY_DIR = DATA_DIR.joinpath("raw")
CSV_DIR = DATA_DIR.joinpath("interim")
# Named Tuples
RunData = namedtuple("RunData", "experiment user run")
TlxResponse = namedtuple("TlxResponse", "code raw weight score")
Coords = namedtuple("Coords", "x y")
TARGET = Coords(0, 6)
# Regexes
FILENAME_PATTERN = "experiment-(\d)_user-(\d+)_run-(\d+)"
filename_regex = re.compile(pattern=FILENAME_PATTERN)
def distance(df, target):
return np.sqrt((df.xn - target.x)**2 + (df.yn - target.y)**2)
def usable_filenames():
filenames = sorted([filename for filename in glob.glob(f"{CSV_DIR}/*.csv")
if "user-99" not in filename])
for filename in filenames:
if filename_regex.search(filename):
df = pd.read_csv(filename, parse_dates=["time"])
else:
continue
if any(df.arrived):
yield filename
def extract_run_data(filename):
data = filename_regex.findall(filename)[0]
return RunData(ExperimentType(int(data[0])), int(data[1]), int(data[2]))
def analyze_data():
print("Loading files...")
analyses = pd.DataFrame(columns="user experiment_type run "
"dist_err x_err y_err "
"dist_std x_std y_std "
"rms rms_x rms_y "
"idx_start idx_end t_start t_end duration "
"path_length "
"move_l move_r move_x "
"move_b move_f move_y".split())
records = []
for i, filename in enumerate(usable_filenames()):
df = pd.read_csv(filename, parse_dates=["time"])
data = extract_run_data(filename)
df.xn *= -1
df.yn *= -1
df["experiment_type"] = data.experiment
df["user"] = data.user
df["run"] = data.run
records.append(df)
found = df[df.arrived == 1]
distances = distance(found, TARGET)
dist_err = distances.mean()
dist_std = distances.std()
dx = found.xn - TARGET.x
x_err = dx.mean()
x_std = dx.std()
dy = found.yn - TARGET.y
y_err = dy.mean()
y_std = dy.std()
rms = np.sqrt(np.mean(((found.xn - TARGET.x)**2
+ (found.yn - TARGET.y)**2)))
rms_x = np.sqrt(np.mean((dx)**2))
rms_y = np.sqrt(np.mean((dy)**2))
t_start = df[df.z > 0.25].time.iloc[0]
t_end = found.time.iloc[0]
duration = (t_end - t_start).total_seconds()
idx_start = df[df.time==t_start].index[0]
idx_end = df[df.time==t_end].index[0]
df_running = df[["xn", "yn"]].iloc[idx_start:idx_end+1]
points = np.array(df_running)
lengths = np.sqrt(np.sum(np.diff(points, axis=0)**2, axis=1))
path = lengths.sum()
diff_x = np.diff(points[:, 0])
move_l = np.abs(np.sum(diff_x[diff_x > 0]))
move_r = np.abs(np.sum(diff_x[diff_x < 0]))
move_x = move_l + move_r
diff_y = np.diff(points[:, 1])
move_b = np.abs(np.sum(diff_y[diff_y > 0]))
move_f = np.abs(np.sum(diff_y[diff_y < 0]))
move_y = move_b + move_f
analyses.loc[i] = [
data.user, data.experiment, data.run,
dist_err, x_err, y_err,
dist_std, x_std, y_std,
rms, rms_x, rms_y,
idx_start, idx_end, t_start, t_end, duration,
path,
move_x, move_l, move_r,
move_y, move_b, move_f,
]
results = pd.concat(records, ignore_index=True)
analyses["group"] = (analyses.user % 2).astype(int)
analyses["start"] = [ExperimentType(e % 2 + 1) for e in analyses.user]
analyses["experiment_int"] = [e.value for e in analyses.experiment_type]
analyses["experiment"] = [e.name for e in analyses.experiment_type]
analyses.experiment.replace("Spirit", "SPIRIT", inplace=True)
results["group"] = (results.user % 2).astype(int)
results["start"] = [ExperimentType(e % 2 + 1) for e in results.user]
results["experiment_int"] = [e.value for e in results.experiment_type]
results["experiment"] = [e.name for e in results.experiment_type]
results.experiment.replace("Spirit", "SPIRIT", inplace=True)
results.distance = distance(results, TARGET)
results.dx = results.xn - TARGET.x
results.dy = results.yn - TARGET.y
results["total_ordering"] = (
(results.run.diff(1) != 0)
| (results.experiment_int.diff(1) != 0)
| (results.user.diff(1) != 0)
).astype('int').cumsum() - 1
for user in set(analyses.user):
df = analyses[analyses.user == user].sort_values(
by="experiment_int", ascending=np.all(analyses[analyses.user==user]
.start==ExperimentType.Onboard))
df["order"] = range(1, len(df) + 1)
for idx in df.index:
analyses.loc[idx, "order"] = int(df.loc[idx, "order"])
results.loc[results.total_ordering==idx,
"order"] = int(df.loc[idx, "order"])
for col in ["user", "run", "group", "order"]:
analyses[col] = analyses[col].astype(int)
for col in ["arrived", "order"]:
results[col] = results[col].astype(int)
print("Loaded files")
return results, analyses
def load_surveys():
print("Loading surveys...")
with open(SURVEY_DIR.joinpath("survey_data.pkl"), "rb") as fin:
data = pickle.load(fin)
users = _load_users(data)
tlx = _load_tlx(data)
surveys = _load_surveys(data, tlx)
print("Loaded surveys")
return users, tlx, surveys
def _load_users(data):
return pd.DataFrame({"user_id": user.id_, "name": user.name,
"age": user.age, "gender": user.gender,
"teleop": user.teleop, "flying": user.flying}
for user in data)
def _parse_tlx_component(component):
return TlxResponse(component.code, component.score, component.weight,
component.weighted_score)
def _load_tlx(data):
tlx_data = []
for user in data:
for experiment in user.experiments:
d = {"user": user.id_, "experiment_type": experiment.type_}
for component in experiment.tlx.components.values():
parsed = _parse_tlx_component(component)
d[parsed.code] = parsed.score
d[f"{parsed.code}_raw"] = parsed.raw
tlx_data.append(d)
tlx = pd.DataFrame(tlx_data)
tlx["group"] = tlx.user % 2
tlx["tlx"] = (tlx.mental + tlx.physical + tlx.temporal
+ tlx.performance + tlx.effort + tlx.frustration)
tlx["order"] = [1, 2]*(len(tlx)//2)
tlx["experiment_int"] = [e.value for e in tlx.experiment_type]
tlx["experiment"] = [e.name for e in tlx.experiment_type]
tlx.experiment.replace("Spirit", "SPIRIT", inplace=True)
return tlx
def _load_surveys(data, tlx):
survey_data = []
for user in data:
for experiment in user.experiments:
d = {"user": user.id_, "experiment_type": experiment.type_}
d.update({i.code:i.score
for i in experiment.survey.questions.values()})
survey_data.append(d)
surveys = pd.DataFrame(survey_data)
surveys["group"] = tlx.user % 2
surveys["order"] = [1, 2]*(len(surveys)//2)
surveys["experiment"] = [e.name for e in surveys.experiment_type]
surveys["experiment_int"] = [e.value for e in surveys.experiment_type]
surveys["total"] = (surveys.orientation_understanding
+ surveys.orientation_control
+ surveys.position_understanding
+ surveys.position_control
+ surveys.spacial_understanding
+ surveys.spacial_control)
surveys.experiment.replace("Spirit", "SPIRIT", inplace=True)
return surveys
if __name__ == "__main__":
results, analyses = analyze_data()
# analyze_differences(analyses, ["duration", "dist_err", "x_err", "y_err",
# "rms_x", "rms_y"])
|
|
import logging
import math
import re
import sys
from .exceptions import PapermillException
from .models import Parameter
logger = logging.getLogger(__name__)
class PapermillTranslators(object):
'''
The holder which houses any translator registered with the system.
This object is used in a singleton manner to save and load particular
named Translator objects for reference externally.
'''
def __init__(self):
self._translators = {}
def register(self, language, translator):
self._translators[language] = translator
def find_translator(self, kernel_name, language):
if kernel_name in self._translators:
return self._translators[kernel_name]
elif language in self._translators:
return self._translators[language]
raise PapermillException(
"No parameter translator functions specified for kernel '{}' or language '{}'".format(
kernel_name, language
)
)
class Translator(object):
@classmethod
def translate_raw_str(cls, val):
"""Reusable by most interpreters"""
return '{}'.format(val)
@classmethod
def translate_escaped_str(cls, str_val):
"""Reusable by most interpreters"""
if isinstance(str_val, str):
str_val = str_val.encode('unicode_escape')
if sys.version_info >= (3, 0):
str_val = str_val.decode('utf-8')
str_val = str_val.replace('"', r'\"')
return '"{}"'.format(str_val)
@classmethod
def translate_str(cls, val):
"""Default behavior for translation"""
return cls.translate_escaped_str(val)
@classmethod
def translate_none(cls, val):
"""Default behavior for translation"""
return cls.translate_raw_str(val)
@classmethod
def translate_int(cls, val):
"""Default behavior for translation"""
return cls.translate_raw_str(val)
@classmethod
def translate_float(cls, val):
"""Default behavior for translation"""
return cls.translate_raw_str(val)
@classmethod
def translate_bool(cls, val):
"""Default behavior for translation"""
return 'true' if val else 'false'
@classmethod
def translate_dict(cls, val):
raise NotImplementedError('dict type translation not implemented for {}'.format(cls))
@classmethod
def translate_list(cls, val):
raise NotImplementedError('list type translation not implemented for {}'.format(cls))
@classmethod
def translate(cls, val):
"""Translate each of the standard json/yaml types to appropiate objects."""
if val is None:
return cls.translate_none(val)
elif isinstance(val, str):
return cls.translate_str(val)
# Needs to be before integer checks
elif isinstance(val, bool):
return cls.translate_bool(val)
elif isinstance(val, int):
return cls.translate_int(val)
elif isinstance(val, float):
return cls.translate_float(val)
elif isinstance(val, dict):
return cls.translate_dict(val)
elif isinstance(val, list):
return cls.translate_list(val)
# Use this generic translation as a last resort
return cls.translate_escaped_str(val)
@classmethod
def comment(cls, cmt_str):
raise NotImplementedError('comment translation not implemented for {}'.format(cls))
@classmethod
def assign(cls, name, str_val):
return '{} = {}'.format(name, str_val)
@classmethod
def codify(cls, parameters, comment='Parameters'):
content = '{}\n'.format(cls.comment(comment))
for name, val in parameters.items():
content += '{}\n'.format(cls.assign(name, cls.translate(val)))
return content
@classmethod
def inspect(cls, parameters_cell):
"""Inspect the parameters cell to get a Parameter list
It must return an empty list if no parameters are found and
it should ignore inspection errors.
.. note::
``inferred_type_name`` should be "None" if unknown (set it
to "NoneType" for null value)
Parameters
----------
parameters_cell : NotebookNode
Cell tagged _parameters_
Returns
-------
List[Parameter]
A list of all parameters
"""
raise NotImplementedError('parameters introspection not implemented for {}'.format(cls))
class PythonTranslator(Translator):
# Pattern to capture parameters within cell input
PARAMETER_PATTERN = re.compile(
r"^(?P<target>\w[\w_]*)\s*(:\s*[\"']?(?P<annotation>\w[\w_\[\],\s]*)[\"']?\s*)?=\s*(?P<value>.*?)(\s*#\s*(type:\s*(?P<type_comment>[^\s]*)\s*)?(?P<help>.*))?$" # noqa
)
@classmethod
def translate_float(cls, val):
if math.isfinite(val):
return cls.translate_raw_str(val)
elif math.isnan(val):
return "float('nan')"
elif val < 0:
return "float('-inf')"
else:
return "float('inf')"
@classmethod
def translate_bool(cls, val):
return cls.translate_raw_str(val)
@classmethod
def translate_dict(cls, val):
escaped = ', '.join(
["{}: {}".format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return '{{{}}}'.format(escaped)
@classmethod
def translate_list(cls, val):
escaped = ', '.join([cls.translate(v) for v in val])
return '[{}]'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '# {}'.format(cmt_str).strip()
@classmethod
def codify(cls, parameters, comment='Parameters'):
content = super(PythonTranslator, cls).codify(parameters, comment)
if sys.version_info >= (3, 6):
try:
# Put content through the Black Python code formatter
import black
fm = black.FileMode(string_normalization=False)
content = black.format_str(content, mode=fm)
except ImportError:
logger.warning("Black is not installed, parameters wont be formatted")
return content
@classmethod
def inspect(cls, parameters_cell):
"""Inspect the parameters cell to get a Parameter list
It must return an empty list if no parameters are found and
it should ignore inspection errors.
Parameters
----------
parameters_cell : NotebookNode
Cell tagged _parameters_
Returns
-------
List[Parameter]
A list of all parameters
"""
params = []
src = parameters_cell['source']
def flatten_accumulator(accumulator):
"""Flatten a multilines variable definition.
Remove all comments except on the latest line - will be interpreted as help.
Args:
accumulator (List[str]): Line composing the variable definition
Returns:
Flatten definition
"""
flat_string = ""
for line in accumulator[:-1]:
if "#" in line:
comment_pos = line.index("#")
flat_string += line[:comment_pos].strip()
else:
flat_string += line.strip()
if len(accumulator):
flat_string += accumulator[-1].strip()
return flat_string
# Some common type like dictionaries or list can be expressed over multiline.
# To support the parsing of such case, the cell lines are grouped between line
# actually containing an assignment. In each group, the commented and empty lines
# are skip; i.e. the parameter help can only be given as comment on the last variable
# line definition
grouped_variable = []
accumulator = []
for iline, line in enumerate(src.splitlines()):
if len(line.strip()) == 0 or line.strip().startswith('#'):
continue # Skip blank and comment
nequal = line.count("=")
if nequal > 0:
grouped_variable.append(flatten_accumulator(accumulator))
accumulator = []
if nequal > 1:
logger.warning("Unable to parse line {} '{}'.".format(iline + 1, line))
continue
accumulator.append(line)
grouped_variable.append(flatten_accumulator(accumulator))
for definition in grouped_variable:
if len(definition) == 0:
continue
match = re.match(cls.PARAMETER_PATTERN, definition)
if match is not None:
attr = match.groupdict()
if attr["target"] is None: # Fail to get variable name
continue
type_name = str(attr["annotation"] or attr["type_comment"] or None)
params.append(
Parameter(
name=attr["target"].strip(),
inferred_type_name=type_name.strip(),
default=str(attr["value"]).strip(),
help=str(attr["help"] or "").strip(),
)
)
return params
class RTranslator(Translator):
@classmethod
def translate_none(cls, val):
return 'NULL'
@classmethod
def translate_bool(cls, val):
return 'TRUE' if val else 'FALSE'
@classmethod
def translate_dict(cls, val):
escaped = ', '.join(
['{} = {}'.format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return 'list({})'.format(escaped)
@classmethod
def translate_list(cls, val):
escaped = ', '.join([cls.translate(v) for v in val])
return 'list({})'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '# {}'.format(cmt_str).strip()
@classmethod
def assign(cls, name, str_val):
# Leading '_' aren't legal R variable names -- so we drop them when injecting
while name.startswith("_"):
name = name[1:]
return '{} = {}'.format(name, str_val)
class ScalaTranslator(Translator):
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
"""Translate dicts to scala Maps"""
escaped = ', '.join(
["{} -> {}".format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return 'Map({})'.format(escaped)
@classmethod
def translate_list(cls, val):
"""Translate list to scala Seq"""
escaped = ', '.join([cls.translate(v) for v in val])
return 'Seq({})'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '// {}'.format(cmt_str).strip()
@classmethod
def assign(cls, name, str_val):
return 'val {} = {}'.format(name, str_val)
class JuliaTranslator(Translator):
@classmethod
def translate_none(cls, val):
return 'nothing'
@classmethod
def translate_dict(cls, val):
escaped = ', '.join(
["{} => {}".format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return 'Dict({})'.format(escaped)
@classmethod
def translate_list(cls, val):
escaped = ', '.join([cls.translate(v) for v in val])
return '[{}]'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '# {}'.format(cmt_str).strip()
class MatlabTranslator(Translator):
@classmethod
def translate_escaped_str(cls, str_val):
"""Translate a string to an escaped Matlab string"""
if isinstance(str_val, str):
str_val = str_val.encode('unicode_escape')
if sys.version_info >= (3, 0):
str_val = str_val.decode('utf-8')
str_val = str_val.replace('"', '""')
return '"{}"'.format(str_val)
@staticmethod
def __translate_char_array(str_val):
"""Translates a string to a Matlab char array"""
if isinstance(str_val, str):
str_val = str_val.encode('unicode_escape')
if sys.version_info >= (3, 0):
str_val = str_val.decode('utf-8')
str_val = str_val.replace('\'', '\'\'')
return '\'{}\''.format(str_val)
@classmethod
def translate_none(cls, val):
return 'NaN'
@classmethod
def translate_dict(cls, val):
keys = ', '.join(["{}".format(cls.__translate_char_array(k)) for k, v in val.items()])
vals = ', '.join(["{}".format(cls.translate(v)) for k, v in val.items()])
return 'containers.Map({{{}}}, {{{}}})'.format(keys, vals)
@classmethod
def translate_list(cls, val):
escaped = ', '.join([cls.translate(v) for v in val])
return '{{{}}}'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '% {}'.format(cmt_str).strip()
@classmethod
def codify(cls, parameters, comment='Parameters'):
content = '{}\n'.format(cls.comment(comment))
for name, val in parameters.items():
content += '{};\n'.format(cls.assign(name, cls.translate(val)))
return content
class CSharpTranslator(Translator):
@classmethod
def translate_none(cls, val):
# Can't figure out how to do this as nullable
raise NotImplementedError("Option type not implemented for C#.")
@classmethod
def translate_bool(cls, val):
return 'true' if val else 'false'
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
"""Translate dicts to nontyped dictionary"""
kvps = ', '.join(
["{{ {} , {} }}".format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return 'new Dictionary<string,Object>{{ {} }}'.format(kvps)
@classmethod
def translate_list(cls, val):
"""Translate list to array"""
escaped = ', '.join([cls.translate(v) for v in val])
return 'new [] {{ {} }}'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '// {}'.format(cmt_str).strip()
@classmethod
def assign(cls, name, str_val):
return 'var {} = {};'.format(name, str_val)
class FSharpTranslator(Translator):
@classmethod
def translate_none(cls, val):
return 'None'
@classmethod
def translate_bool(cls, val):
return 'true' if val else 'false'
@classmethod
def translate_int(cls, val):
strval = cls.translate_raw_str(val)
return strval + "L" if (val > 2147483647 or val < -2147483648) else strval
@classmethod
def translate_dict(cls, val):
tuples = '; '.join(
[
"({}, {} :> IComparable)".format(cls.translate_str(k), cls.translate(v))
for k, v in val.items()
]
)
return '[ {} ] |> Map.ofList'.format(tuples)
@classmethod
def translate_list(cls, val):
escaped = '; '.join([cls.translate(v) for v in val])
return '[ {} ]'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '(* {} *)'.format(cmt_str).strip()
@classmethod
def assign(cls, name, str_val):
return 'let {} = {}'.format(name, str_val)
class PowershellTranslator(Translator):
@classmethod
def translate_escaped_str(cls, str_val):
"""Translate a string to an escaped Matlab string"""
if isinstance(str_val, str):
str_val = str_val.encode('unicode_escape')
if sys.version_info >= (3, 0):
str_val = str_val.decode('utf-8')
str_val = str_val.replace('"', '`"')
return '"{}"'.format(str_val)
@classmethod
def translate_float(cls, val):
if math.isfinite(val):
return cls.translate_raw_str(val)
elif math.isnan(val):
return "[double]::NaN"
elif val < 0:
return "[double]::NegativeInfinity"
else:
return "[double]::PositiveInfinity"
@classmethod
def translate_none(cls, val):
return '$Null'
@classmethod
def translate_bool(cls, val):
return '$True' if val else '$False'
@classmethod
def translate_dict(cls, val):
kvps = '\n '.join(
["{} = {}".format(cls.translate_str(k), cls.translate(v)) for k, v in val.items()]
)
return '@{{{}}}'.format(kvps)
@classmethod
def translate_list(cls, val):
escaped = ', '.join([cls.translate(v) for v in val])
return '@({})'.format(escaped)
@classmethod
def comment(cls, cmt_str):
return '# {}'.format(cmt_str).strip()
@classmethod
def assign(cls, name, str_val):
return '${} = {}'.format(name, str_val)
# Instantiate a PapermillIO instance and register Handlers.
papermill_translators = PapermillTranslators()
papermill_translators.register("python", PythonTranslator)
papermill_translators.register("R", RTranslator)
papermill_translators.register("scala", ScalaTranslator)
papermill_translators.register("julia", JuliaTranslator)
papermill_translators.register("matlab", MatlabTranslator)
papermill_translators.register(".net-csharp", CSharpTranslator)
papermill_translators.register(".net-fsharp", FSharpTranslator)
papermill_translators.register(".net-powershell", PowershellTranslator)
papermill_translators.register("pysparkkernel", PythonTranslator)
papermill_translators.register("sparkkernel", ScalaTranslator)
papermill_translators.register("sparkrkernel", RTranslator)
def translate_parameters(kernel_name, language, parameters, comment='Parameters'):
return papermill_translators.find_translator(kernel_name, language).codify(parameters, comment)
|
|
"""The tests for the REST switch platform."""
import asyncio
from http import HTTPStatus
import aiohttp
from homeassistant.components.rest import DOMAIN
import homeassistant.components.rest.switch as rest
from homeassistant.components.switch import DEVICE_CLASS_SWITCH, DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
CONF_HEADERS,
CONF_NAME,
CONF_PARAMS,
CONF_PLATFORM,
CONF_RESOURCE,
CONTENT_TYPE_JSON,
)
from homeassistant.helpers.template import Template
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
"""Tests for setting up the REST switch platform."""
NAME = "foo"
DEVICE_CLASS = DEVICE_CLASS_SWITCH
METHOD = "post"
RESOURCE = "http://localhost/"
STATE_RESOURCE = RESOURCE
HEADERS = {"Content-type": CONTENT_TYPE_JSON}
AUTH = None
PARAMS = None
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
assert not await rest.async_setup_platform(hass, {CONF_PLATFORM: DOMAIN}, None)
async def test_setup_missing_schema(hass):
"""Test setup with resource missing schema."""
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "localhost"},
None,
)
async def test_setup_failed_connect(hass, aioclient_mock):
"""Test setup when connection error occurs."""
aioclient_mock.get("http://localhost", exc=aiohttp.ClientError)
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_timeout(hass, aioclient_mock):
"""Test setup when connection timeout occurs."""
aioclient_mock.get("http://localhost", exc=asyncio.TimeoutError())
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_minimum(hass, aioclient_mock):
"""Test setup with minimum configuration."""
aioclient_mock.get("http://localhost", status=HTTPStatus.OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: DOMAIN,
CONF_RESOURCE: "http://localhost",
}
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
async def test_setup_query_params(hass, aioclient_mock):
"""Test setup with query params."""
aioclient_mock.get("http://localhost/?search=something", status=HTTPStatus.OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: DOMAIN,
CONF_RESOURCE: "http://localhost",
CONF_PARAMS: {"search": "something"},
}
},
)
await hass.async_block_till_done()
print(aioclient_mock)
assert aioclient_mock.call_count == 1
async def test_setup(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTPStatus.OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
async def test_setup_with_state_resource(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTPStatus.NOT_FOUND)
aioclient_mock.get("http://localhost/state", status=HTTPStatus.OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
rest.CONF_STATE_RESOURCE: "http://localhost/state",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
"""Tests for REST switch platform."""
def _setup_test_switch(hass):
body_on = Template("on", hass)
body_off = Template("off", hass)
switch = rest.RestSwitch(
NAME,
DEVICE_CLASS,
RESOURCE,
STATE_RESOURCE,
METHOD,
HEADERS,
PARAMS,
AUTH,
body_on,
body_off,
None,
10,
True,
)
switch.hass = hass
return switch, body_on, body_off
def test_name(hass):
"""Test the name."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.name == NAME
def test_device_class(hass):
"""Test the name."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.device_class == DEVICE_CLASS
def test_is_on_before_update(hass):
"""Test is_on in initial state."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.is_on is None
async def test_turn_on_success(hass, aioclient_mock):
"""Test turn_on."""
aioclient_mock.post(RESOURCE, status=HTTPStatus.OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on
async def test_turn_on_status_not_ok(hass, aioclient_mock):
"""Test turn_on when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTPStatus.INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_on_timeout(hass, aioclient_mock):
"""Test turn_on when timeout occurs."""
aioclient_mock.post(RESOURCE, status=HTTPStatus.INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_turn_off_success(hass, aioclient_mock):
"""Test turn_off."""
aioclient_mock.post(RESOURCE, status=HTTPStatus.OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert not switch.is_on
async def test_turn_off_status_not_ok(hass, aioclient_mock):
"""Test turn_off when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTPStatus.INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_off_timeout(hass, aioclient_mock):
"""Test turn_off when timeout occurs."""
aioclient_mock.post(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_update_when_on(hass, aioclient_mock):
"""Test update when switch is on."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_on.template)
await switch.async_update()
assert switch.is_on
async def test_update_when_off(hass, aioclient_mock):
"""Test update when switch is off."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_off.template)
await switch.async_update()
assert not switch.is_on
async def test_update_when_unknown(hass, aioclient_mock):
"""Test update when unknown status returned."""
aioclient_mock.get(RESOURCE, text="unknown status")
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
async def test_update_timeout(hass, aioclient_mock):
"""Test update when timeout occurs."""
aioclient_mock.get(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
|
|
import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
#Vocabulary size.
PAD = 0
EOS = 1
vocab_size = 50
input_embedding_size = 20 #character length
encoder_hidden_units = 20 #num neurons
decoder_hidden_units = encoder_hidden_units * 2
#in original paper, they used same number of neurons for both encoder and decoder, but we use twice
#as many so decoded output is different, the target value is the original input in this example
#input placeholders
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
#randomly initialized embedding matrix that can fit input sequence
#used to convert sequences to vectors (embeddings) for both encoder and decoder of the right size
#reshaping is a thing, in TF you gotta make sure you tensors are the right shape (num dimensions)
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
#####API LOCATION ERROR HERE
from tensorflow.python.ops.rnn_cell import LSTMCell, LSTMStateTuple
#from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import LSTMCell, LSTMStateTuple
encoder_cell_fw = LSTMCell(encoder_hidden_units)
encoder_cell_bw = LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,encoder_bw_outputs),(encoder_fw_final_state,encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
sequence_length=encoder_inputs_length,
dtype=tf.float32, time_major=True)
)
#Concatenates tensors along one dimension.
##############print("? = ",encoder_bw_outputs)
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
#letters h and c are commonly used to denote "output value" and "cell state".
#http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#Those tensors represent combined internal state of the cell, and should be passed together.
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
#TF Tuple used by LSTM Cells for state_size, zero_state, and output state.
encoder_final_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h
)
decoder_cell = LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_inputs))
decoder_lengths = encoder_inputs_length + 3
# +2 additional steps, +1 leading <EOS> token for decoder inputs
#manually specifying since we are going to implement attention details for the decoder in a sec
#weights
W = tf.Variable(tf.random_uniform([decoder_hidden_units, vocab_size], -1, 1), dtype=tf.float32)
#bias
b = tf.Variable(tf.zeros([vocab_size]), dtype=tf.float32)
#create padded inputs for the decoder from the word embeddings
#were telling the program to tests a condition, and trigger an error if the condition is false.
assert EOS == 1 and PAD == 0
eos_time_slice = tf.ones([batch_size], dtype=tf.int32, name='EOS')
pad_time_slice = tf.zeros([batch_size], dtype=tf.int32, name='PAD')
#retrieves rows of the params tensor. The behavior is similar to using indexing with arrays in numpy
eos_step_embedded = tf.nn.embedding_lookup(embeddings, eos_time_slice)
pad_step_embedded = tf.nn.embedding_lookup(embeddings, pad_time_slice)
#manually specifying loop function through time - to get initial cell state and input to RNN
#normally we'd just use dynamic_rnn, but lets get detailed here with raw_rnn
#we define and return these values, no operations occur here
def loop_fn_initial():
initial_elements_finished = (0 >= decoder_lengths) # all False at the initial step
#end of sentence
initial_input = eos_step_embedded
#last time steps cell state
initial_cell_state = encoder_final_state
#none
initial_cell_output = None
#none
initial_loop_state = None # we don't need to pass any additional information
return (initial_elements_finished,
initial_input,
initial_cell_state,
initial_cell_output,
initial_loop_state)
#attention mechanism --choose which previously generated token to pass as input in the next timestep
def loop_fn_transition(time, previous_output, previous_state, previous_loop_state):
def get_next_input():
#dot product between previous ouput and weights, then + biases
output_logits = tf.add(tf.matmul(previous_output, W), b)
#Logits simply means that the function operates on the unscaled output of
#earlier layers and that the relative scale to understand the units is linear.
#It means, in particular, the sum of the inputs may not equal 1, that the values are not probabilities
#(you might have an input of 5).
#prediction value at current time step
#Returns the index with the largest value across axes of a tensor.
prediction = tf.argmax(output_logits, axis=1)
#embed prediction for the next input
next_input = tf.nn.embedding_lookup(embeddings, prediction)
return next_input
elements_finished = (time >= decoder_lengths) # this operation produces boolean tensor of [batch_size]
# defining if corresponding sequence has ended
#Computes the "logical and" of elements across dimensions of a tensor.
finished = tf.reduce_all(elements_finished) # -> boolean scalar
#Return either fn1() or fn2() based on the boolean predicate pred.
input = tf.cond(finished, lambda: pad_step_embedded, get_next_input)
#set previous to current
state = previous_state
output = previous_output
loop_state = None
return (elements_finished,
input,
state,
output,
loop_state)
def loop_fn(time, previous_output, previous_state, previous_loop_state):
if previous_state is None: # time == 0
assert previous_output is None and previous_state is None
return loop_fn_initial()
else:
return loop_fn_transition(time, previous_output, previous_state, previous_loop_state)
#Creates an RNN specified by RNNCell cell and loop function loop_fn.
#This function is a more primitive version of dynamic_rnn that provides more direct access to the
#inputs each iteration. It also provides more control over when to start and finish reading the sequence,
#and what to emit for the output.
#ta = tensor array
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decoder_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
decoder_outputs
#to convert output to human readable prediction
#we will reshape output tensor
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#reduces dimensionality
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
#flettened output tensor
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
#pass flattened tensor through decoder
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
#prediction vals
decoder_logits = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, vocab_size))
#final prediction
decoder_prediction = tf.argmax(decoder_logits, 2)
#cross entropy loss
#one hot encode the target values so we don't rank just differentiate
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits,
)
#loss function
loss = tf.reduce_mean(stepwise_cross_entropy)
#train it
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_size = 100
batches = helpers.random_sequences(length_from=3, length_to=8, vocab_lower=2, vocab_upper=10, batch_size=batch_size)
print("batches:", batches)
print('head of the batch:')
for seq in next(batches)[:10]:
print(seq)
#############
def next_feed():
batch = next(batches)
encoder_inputs_, encoder_input_lengths_ = helpers.batch(batch)
decoder_targets_, _ = helpers.batch(
[(sequence) + [EOS] + [PAD] * 2 for sequence in batch]
)
return {
encoder_inputs: encoder_inputs_,
encoder_inputs_length: encoder_input_lengths_,
decoder_targets: decoder_targets_,
}
loss_track = []
max_batches = 3001
batches_in_epoch = 1000
import time
try:
start = time.time()
for batch in range(max_batches):
fd = next_feed()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
if(batch != 0):
print("Time used: ", time.time() - start)
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
start = time.time()
except KeyboardInterrupt:
print('training interrupted')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
from oslo_utils import timeutils
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest import clients
from tempest.common import cred_provider
from tempest import config
from tempest import test
CONF = config.CONF
class BaseTrustsV3Test(base.BaseIdentityV3AdminTest):
def setUp(self):
super(BaseTrustsV3Test, self).setUp()
# Use alt_username as the trustee
if not CONF.identity_feature_enabled.trust:
raise self.skipException("Trusts aren't enabled")
self.trustee_username = CONF.identity.alt_username
self.trust_id = None
def tearDown(self):
if self.trust_id:
# Do the delete in tearDown not addCleanup - we want the test to
# fail in the event there is a bug which causes undeletable trusts
self.delete_trust()
super(BaseTrustsV3Test, self).tearDown()
def create_trustor_and_roles(self):
# create a project that trusts will be granted on
self.trustor_project_name = data_utils.rand_name(name='project')
project = self.client.create_project(self.trustor_project_name,
domain_id='default')
self.trustor_project_id = project['id']
self.assertIsNotNone(self.trustor_project_id)
# Create a trustor User
self.trustor_username = data_utils.rand_name('user')
u_desc = self.trustor_username + 'description'
u_email = self.trustor_username + '@testmail.xx'
self.trustor_password = data_utils.rand_name('pass')
user = self.client.create_user(
self.trustor_username,
description=u_desc,
password=self.trustor_password,
email=u_email,
project_id=self.trustor_project_id,
domain_id='default')
self.trustor_user_id = user['id']
# And two roles, one we'll delegate and one we won't
self.delegated_role = data_utils.rand_name('DelegatedRole')
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole')
role = self.client.create_role(self.delegated_role)
self.delegated_role_id = role['id']
role = self.client.create_role(self.not_delegated_role)
self.not_delegated_role_id = role['id']
# Assign roles to trustor
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.delegated_role_id)
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.not_delegated_role_id)
# Get trustee user ID, use the demo user
trustee_username = self.non_admin_client.user
self.trustee_user_id = self.get_user_by_name(trustee_username)['id']
self.assertIsNotNone(self.trustee_user_id)
# Initialize a new client with the trustor credentials
creds = cred_provider.get_credentials(
identity_version='v3',
username=self.trustor_username,
password=self.trustor_password,
user_domain_id='default',
tenant_name=self.trustor_project_name,
project_domain_id='default')
os = clients.Manager(credentials=creds)
self.trustor_client = os.identity_v3_client
def cleanup_user_and_roles(self):
if self.trustor_user_id:
self.client.delete_user(self.trustor_user_id)
if self.trustor_project_id:
self.client.delete_project(self.trustor_project_id)
if self.delegated_role_id:
self.client.delete_role(self.delegated_role_id)
if self.not_delegated_role_id:
self.client.delete_role(self.not_delegated_role_id)
def create_trust(self, impersonate=True, expires=None):
trust_create = self.trustor_client.create_trust(
trustor_user_id=self.trustor_user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.trustor_project_id,
role_names=[self.delegated_role],
impersonation=impersonate,
expires_at=expires)
self.trust_id = trust_create['id']
return trust_create
def validate_trust(self, trust, impersonate=True, expires=None,
summary=False):
self.assertIsNotNone(trust['id'])
self.assertEqual(impersonate, trust['impersonation'])
if expires is not None:
# Omit microseconds component of the expiry time
trust_expires_at = re.sub(r'\.([0-9]){6}', '', trust['expires_at'])
self.assertEqual(expires, trust_expires_at)
else:
self.assertIsNone(trust['expires_at'])
self.assertEqual(self.trustor_user_id, trust['trustor_user_id'])
self.assertEqual(self.trustee_user_id, trust['trustee_user_id'])
self.assertIn('v3/OS-TRUST/trusts', trust['links']['self'])
self.assertEqual(self.trustor_project_id, trust['project_id'])
if not summary:
self.assertEqual(self.delegated_role, trust['roles'][0]['name'])
self.assertEqual(1, len(trust['roles']))
def get_trust(self):
trust_get = self.trustor_client.get_trust(self.trust_id)
return trust_get
def validate_role(self, role):
self.assertEqual(self.delegated_role_id, role['id'])
self.assertEqual(self.delegated_role, role['name'])
self.assertIn('v3/roles/%s' % self.delegated_role_id,
role['links']['self'])
self.assertNotEqual(self.not_delegated_role_id, role['id'])
self.assertNotEqual(self.not_delegated_role, role['name'])
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
def check_trust_roles(self):
# Check we find the delegated role
roles_get = self.trustor_client.get_trust_roles(
self.trust_id)
self.assertEqual(1, len(roles_get))
self.validate_role(roles_get[0])
role_get = self.trustor_client.get_trust_role(
self.trust_id, self.delegated_role_id)
self.validate_role(role_get)
role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
# And that we don't find not_delegated_role
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust_role,
self.trust_id,
self.not_delegated_role_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.check_trust_role,
self.trust_id,
self.not_delegated_role_id)
def delete_trust(self):
self.trustor_client.delete_trust(self.trust_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust,
self.trust_id)
self.trust_id = None
class TrustsV3TestJSON(BaseTrustsV3Test):
def setUp(self):
super(TrustsV3TestJSON, self).setUp()
self.create_trustor_and_roles()
self.addCleanup(self.cleanup_user_and_roles)
@test.idempotent_id('5a0a91a4-baef-4a14-baba-59bf4d7fcace')
def test_trust_impersonate(self):
# Test case to check we can create, get and delete a trust
# updates are not supported for trusts
trust = self.create_trust()
self.validate_trust(trust)
trust_get = self.get_trust()
self.validate_trust(trust_get)
self.check_trust_roles()
@test.idempotent_id('ed2a8779-a7ac-49dc-afd7-30f32f936ed2')
def test_trust_noimpersonate(self):
# Test case to check we can create, get and delete a trust
# with impersonation=False
trust = self.create_trust(impersonate=False)
self.validate_trust(trust, impersonate=False)
trust_get = self.get_trust()
self.validate_trust(trust_get, impersonate=False)
self.check_trust_roles()
@test.idempotent_id('0ed14b66-cefd-4b5c-a964-65759453e292')
def test_trust_expire(self):
# Test case to check we can create, get and delete a trust
# with an expiry specified
expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
# NOTE(ylobankov) In some cases the expiry time may be rounded up
# because of microseconds. In fact, it depends on database and its
# version. At least MySQL 5.6.16 does this.
# For example, when creating a trust, we will set the expiry time of
# the trust to 2015-02-17T17:34:01.907051Z. However, if we make a GET
# request on the trust, the response will contain the time rounded up
# to 2015-02-17T17:34:02.000000Z. That is why we shouldn't set flag
# "subsecond" to True when we invoke timeutils.isotime(...) to avoid
# problems with rounding.
expires_str = timeutils.isotime(at=expires_at)
trust = self.create_trust(expires=expires_str)
self.validate_trust(trust, expires=expires_str)
trust_get = self.get_trust()
self.validate_trust(trust_get, expires=expires_str)
self.check_trust_roles()
@test.idempotent_id('3e48f95d-e660-4fa9-85e0-5a3d85594384')
def test_trust_expire_invalid(self):
# Test case to check we can check an invlaid expiry time
# is rejected with the correct error
# with an expiry specified
expires_str = 'bad.123Z'
self.assertRaises(lib_exc.BadRequest,
self.create_trust,
expires=expires_str)
@test.idempotent_id('6268b345-87ca-47c0-9ce3-37792b43403a')
def test_get_trusts_query(self):
self.create_trust()
trusts_get = self.trustor_client.get_trusts(
trustor_user_id=self.trustor_user_id)
self.assertEqual(1, len(trusts_get))
self.validate_trust(trusts_get[0], summary=True)
@test.attr(type='smoke')
@test.idempotent_id('4773ebd5-ecbf-4255-b8d8-b63e6f72b65d')
def test_get_trusts_all(self):
self.create_trust()
trusts_get = self.client.get_trusts()
trusts = [t for t in trusts_get
if t['id'] == self.trust_id]
self.assertEqual(1, len(trusts))
self.validate_trust(trusts[0], summary=True)
|
|
import logging
from io import BytesIO
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.resource.subscriptions import SubscriptionClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage.blob import BlockBlobService
from azure.storage.table import TableService
log = logging.getLogger(__name__)
class AzureClient(object):
"""
Azure client is the wrapper on top of azure python sdk
"""
def __init__(self, config):
self._config = config
self.subscription_id = config.get('azure_subscription_id')
self._credentials = ServicePrincipalCredentials(
client_id=config.get('azure_client_id'),
secret=config.get('azure_secret'),
tenant=config.get('azure_tenant')
)
self._resource_client = None
self._storage_client = None
self._network_management_client = None
self._subscription_client = None
self._compute_client = None
self._access_key_result = None
self._block_blob_service = None
self._table_service = None
log.debug("azure subscription : %s", self.subscription_id)
@property
def access_key_result(self):
if not self._access_key_result:
self._access_key_result = self.storage_client.storage_accounts. \
list_keys(self.resource_group, self.storage_account)
return self._access_key_result
@property
def resource_group(self):
return self._config.get('azure_resource_group')
@property
def storage_account(self):
return self._config.get('azure_storage_account')
@property
def region_name(self):
return self._config.get('azure_region_name')
@property
def public_key_storage_table_name(self):
return self._config.get('azure_public_key_storage_table_name')
@property
def storage_client(self):
if not self._storage_client:
self._storage_client = \
StorageManagementClient(self._credentials,
self.subscription_id)
return self._storage_client
@property
def subscription_client(self):
if not self._subscription_client:
self._subscription_client = SubscriptionClient(self._credentials)
return self._subscription_client
@property
def resource_client(self):
if not self._resource_client:
self._resource_client = \
ResourceManagementClient(self._credentials,
self.subscription_id)
return self._resource_client
@property
def compute_client(self):
if not self._compute_client:
self._compute_client = \
ComputeManagementClient(self._credentials,
self.subscription_id)
return self._compute_client
@property
def network_management_client(self):
if not self._network_management_client:
self._network_management_client = NetworkManagementClient(
self._credentials, self.subscription_id)
return self._network_management_client
@property
def blob_service(self):
if not self._block_blob_service:
self._block_blob_service = BlockBlobService(
self.storage_account,
self.access_key_result.keys[0].value)
return self._block_blob_service
@property
def table_service(self):
if not self._table_service:
self._table_service = TableService(
self.storage_account,
self.access_key_result.keys[0].value)
if not self._table_service. \
exists(table_name=self.public_key_storage_table_name):
self._table_service.create_table(
self.public_key_storage_table_name)
return self._table_service
def get_resource_group(self, name):
return self.resource_client.resource_groups.get(name)
def create_resource_group(self, name, parameters):
return self.resource_client.resource_groups. \
create_or_update(name, parameters)
def get_storage_account(self, storage_account):
return self.storage_client.storage_accounts. \
get_properties(self.resource_group, storage_account)
def create_storage_account(self, name, params):
return self.storage_client.storage_accounts. \
create(self.resource_group, name.lower(), params).result()
def list_locations(self):
return self.subscription_client.subscriptions. \
list_locations(self.subscription_id)
def list_security_group(self):
return self.network_management_client.network_security_groups. \
list(self.resource_group)
def create_security_group(self, name, parameters):
return self.network_management_client.network_security_groups. \
create_or_update(self.resource_group, name,
parameters).result()
def update_security_group_tags(self, name, tags):
return self.network_management_client.network_security_groups. \
create_or_update(self.resource_group, name,
{'tags': tags}).result()
def create_security_group_rule(self, security_group,
rule_name, parameters):
return self.network_management_client.security_rules. \
create_or_update(self.resource_group, security_group,
rule_name, parameters).result()
def delete_security_group_rule(self, name, security_group):
return self.network_management_client.security_rules. \
delete(self.resource_group, security_group, name).result()
def get_security_group(self, name):
return self.network_management_client.network_security_groups. \
get(self.resource_group, name)
def delete_security_group(self, name):
delete_async = self.network_management_client \
.network_security_groups. \
delete(self.resource_group, name)
delete_async.wait()
def list_containers(self, prefix=None):
return self.blob_service.list_containers(prefix=prefix)
def create_container(self, container_name):
self.blob_service.create_container(container_name)
return self.blob_service.get_container_properties(container_name)
def get_container(self, container_name):
return self.blob_service.get_container_properties(container_name)
def delete_container(self, container_name):
self.blob_service.delete_container(container_name)
def list_blobs(self, container_name, prefix=None):
return self.blob_service.list_blobs(container_name, prefix=prefix)
def get_blob(self, container_name, blob_name):
return self.blob_service.get_blob_properties(container_name, blob_name)
def create_blob_from_text(self, container_name, blob_name, text):
self.blob_service.create_blob_from_text(container_name,
blob_name, text)
def create_blob_from_file(self, container_name, blob_name, file_path):
self.blob_service.create_blob_from_path(container_name,
blob_name, file_path)
def delete_blob(self, container_name, blob_name):
self.blob_service.delete_blob(container_name, blob_name)
def get_blob_url(self, container_name, blob_name):
return self.blob_service.make_blob_url(container_name, blob_name)
def get_blob_content(self, container_name, blob_name):
out_stream = BytesIO()
self.blob_service.get_blob_to_stream(container_name,
blob_name, out_stream)
return out_stream
def create_empty_disk(self, disk_name, params):
return self.compute_client.disks.create_or_update(
self.resource_group,
disk_name,
params,
raw=True
)
def create_snapshot_disk(self, disk_name, params):
return self.compute_client.disks.create_or_update(
self.resource_group,
disk_name,
params,
raw=True
)
def list_snapshots(self):
return self.compute_client.snapshots. \
list_by_resource_group(self.resource_group)
def update_disk_tags(self, disk_name, tags):
return self.compute_client.disks.update(
self.resource_group,
disk_name,
{'tags': tags},
raw=True
)
def get_disk(self, disk_name):
return self.compute_client.disks. \
get(self.resource_group, disk_name)
def list_networks(self):
return self.network_management_client.virtual_networks.list(
self.resource_group)
def get_network(self, network_name):
return self.network_management_client.virtual_networks.get(
self.resource_group, network_name)
def create_network(self, name, params):
return self.network_management_client.virtual_networks. \
create_or_update(self.resource_group,
name,
parameters=params,
raw=True)
def delete_network(self, network_name):
return self.network_management_client.virtual_networks. \
delete(self.resource_group, network_name).wait()
def create_floating_ip(self, public_ip_name, public_ip_parameters):
return self.network_management_client.public_ip_addresses. \
create_or_update(self.resource_group,
public_ip_name,
public_ip_parameters).result()
def delete_floating_ip(self, public_ip_address_name):
return self.network_management_client.public_ip_addresses. \
delete(self.resource_group,
public_ip_address_name).result()
def list_floating_ips(self):
return self.network_management_client.public_ip_addresses.list(
self.resource_group)
def update_network_tags(self, network_name, tags):
return self.network_management_client.virtual_networks. \
create_or_update(self.resource_group,
network_name,
{
'tags': tags
})
def list_disks(self):
return self.compute_client.disks. \
list_by_resource_group(self.resource_group)
def delete_disk(self, disk_name):
async_deletion = self.compute_client.disks. \
delete(self.resource_group, disk_name)
async_deletion.wait()
def get_snapshot(self, snapshot_name):
return self.compute_client.snapshots.get(self.resource_group,
snapshot_name)
def create_snapshot(self, snapshot_name, params):
return self.compute_client.snapshots.create_or_update(
self.resource_group,
snapshot_name,
params,
raw=True
)
def delete_snapshot(self, snapshot_name):
async_delete = self.compute_client.snapshots. \
delete(self.resource_group, snapshot_name)
async_delete.wait()
def update_snapshot_tags(self, snapshot_name, tags):
return self.compute_client.snapshots.update(
self.resource_group,
snapshot_name,
{'tags': tags},
raw=True
)
def create_image(self, name, params):
return self.compute_client.images. \
create_or_update(self.resource_group, name,
params, raw=True)
def delete_image(self, name):
self.compute_client.images. \
delete(self.resource_group, name).wait()
def list_images(self):
return self.compute_client.images. \
list_by_resource_group(self.resource_group)
def get_image(self, image_name):
return self.compute_client.images. \
get(self.resource_group, image_name)
def update_image_tags(self, name, tags):
return self.compute_client.images. \
create_or_update(self.resource_group, name,
{
'tags': tags
}).result()
def list_instance_types(self):
return self.compute_client.virtual_machine_sizes. \
list(self.region_name)
def list_subnets(self, network_name):
return self.network_management_client.subnets. \
list(self.resource_group, network_name)
def get_subnet(self, network_name, subnet_name):
return self.network_management_client.subnets. \
get(self.resource_group, network_name, subnet_name)
def create_subnet(self, network_name,
subnet_name, params):
result_create = self.network_management_client \
.subnets.create_or_update(
self.resource_group,
network_name,
subnet_name,
params
)
subnet_info = result_create.result()
return subnet_info
def delete_subnet(self, network_name, subnet_name):
result_delete = self.network_management_client \
.subnets.delete(
self.resource_group,
network_name,
subnet_name
)
result_delete.wait()
def list_vm(self):
return self.compute_client.virtual_machines.list(
self.resource_group
)
def restart_vm(self, vm_name):
return self.compute_client.virtual_machines.restart(
self.resource_group,
vm_name
).wait()
def delete_vm(self, vm_name):
return self.compute_client.virtual_machines.delete(
self.resource_group,
vm_name
).wait()
def get_vm(self, vm_name):
return self.compute_client.virtual_machines.get(
self.resource_group,
vm_name,
expand='instanceView'
)
def create_vm(self, vm_name, params):
return self.compute_client.virtual_machines. \
create_or_update(self.resource_group,
vm_name, params, raw=True)
def deallocate_vm(self, vm_name):
self.compute_client. \
virtual_machines.deallocate(self.resource_group,
vm_name).wait()
def generalize_vm(self, vm_name):
self.compute_client.virtual_machines. \
generalize(self.resource_group, vm_name)
def start_vm(self, vm_name):
self.compute_client.virtual_machines. \
start(self.resource_group,
vm_name).wait()
def update_vm_tags(self, vm_name, tags):
self.compute_client.virtual_machines. \
create_or_update(self.resource_group,
vm_name, {'tags': tags})
def delete_nic(self, nic_name):
self.network_management_client. \
network_interfaces.delete(self.resource_group,
nic_name).wait()
def get_nic(self, name):
return self.network_management_client. \
network_interfaces.get(self.resource_group, name)
def create_nic(self, nic_name, params):
async_nic_creation = self.network_management_client. \
network_interfaces.create_or_update(
self.resource_group,
nic_name,
params
)
nic_info = async_nic_creation.result()
return nic_info
def get_public_ip(self, name):
return self.network_management_client. \
public_ip_addresses.get(self.resource_group, name)
def delete_public_ip(self, public_ip_name):
self.network_management_client. \
public_ip_addresses.delete(self.resource_group,
public_ip_name).wait()
def create_public_key(self, entity):
return self.table_service. \
insert_or_replace_entity(self.public_key_storage_table_name,
entity)
def get_public_key(self, name):
entities = self.table_service. \
query_entities(self.public_key_storage_table_name,
"Name eq '{0}'".format(name), num_results=1)
return entities.items[0] if len(entities.items) > 0 else None
def delete_public_key(self, entity):
self.table_service.delete_entity(self.public_key_storage_table_name,
entity.PartitionKey, entity.RowKey)
def list_public_keys(self, partition_key):
items = []
next_marker = None
while True:
entities = self.table_service. \
query_entities(self.public_key_storage_table_name,
"PartitionKey eq '{0}'".format(partition_key),
marker=next_marker, num_results=1)
items.extend(entities.items)
next_marker = entities.next_marker
if not next_marker:
break
return items
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 17:05:11
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, project_info, ACTIVE_TASKS=100):
'''
'''
self.paused = False
self.active_tasks = deque(maxlen=ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._send_finished_event = False
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self.update(project_info)
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if (self.md5sum != md5sum or self.waiting_get_info) and self.active:
self._send_on_get_info = True
self.waiting_get_info = True
self.md5sum = md5sum
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG') and not self.paused
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(project, ACTIVE_TASKS=self.ACTIVE_TASKS)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
if project.paused:
logger.error('project %s paused', task['project'])
else:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if project.min_tick == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if cnt >= limit:
break
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._send_finished_event = True
# check and send finished event to project
elif len(task_queue) == 0 and project._send_finished_event:
project._send_finished_event = False
self.on_select_task({
'taskid': 'on_finished',
'project': project.name,
'url': 'data:,on_finished',
'status': self.taskdb.SUCCESS,
'process': {
'callback': 'on_finished',
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("loading projects")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
shell.interact(
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.threads = threads
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
|
import os
import binascii
import py
from pyshark.packet.common import Pickleable
class LayerField(object):
"""
Holds all data about a field of a layer, both its actual value and its name and nice representation.
"""
# Note: We use this object with slots and not just a dict because
# it's much more memory-efficient (cuts about a third of the memory).
__slots__ = ['name', 'showname', 'raw_value', 'show', 'hide', 'pos', 'size', 'unmaskedvalue']
def __init__(self, name=None, showname=None, value=None, show=None, hide=None, pos=None, size=None, unmaskedvalue=None):
self.name = name
self.showname = showname
self.raw_value = value
self.show = show
self.pos = pos
self.size = size
self.unmaskedvalue = unmaskedvalue
if hide and hide == 'yes':
self.hide = True
else:
self.hide = False
def __repr__(self):
return '<LayerField %s: %s>' % (self.name, self.get_default_value())
def get_default_value(self):
"""
Gets the best 'value' string this field has.
"""
val = self.show
if not val:
val = self.raw_value
if not val:
val = self.showname
return val
@property
def showname_value(self):
"""
For fields which do not contain a normal value, we attempt to take their value from the showname.
"""
if self.showname and ': ' in self.showname:
return self.showname.split(': ')[1]
@property
def showname_key(self):
if self.showname and ': ' in self.showname:
return self.showname.split(': ')[0]
def __getstate__(self):
ret = {}
for slot in self.__slots__:
ret[slot] = getattr(self, slot)
return ret
def __setstate__(self, data):
for key, val in data.iteritems():
setattr(self, key, val)
@property
def binary_value(self):
"""
Returns the raw value of this field (as a binary string)
"""
return binascii.unhexlify(self.raw_value)
@property
def int_value(self):
"""
Returns the raw value of this field (as an integer).
"""
return int(self.raw_value, 16)
class LayerFieldsContainer(str, Pickleable):
"""
An object which contains one or more fields (of the same name).
When accessing member, such as showname, raw_value, etc. the appropriate member of the main (first) field saved
in this container will be shown.
"""
def __new__(cls, main_field, *args, **kwargs):
obj = str.__new__(cls, main_field.get_default_value(), *args, **kwargs)
obj.fields = [main_field]
return obj
def add_field(self, field):
self.fields.append(field)
@property
def main_field(self):
return self.fields[0]
@property
def alternate_fields(self):
"""
Return the alternate values of this field containers (non-main ones).
"""
return self.fields[1:]
@property
def all_fields(self):
"""
Returns all fields in a list, the main field followed by the alternate fields.
"""
return self.fields
def __getattr__(self, item):
return getattr(self.main_field, item)
class Layer(Pickleable):
"""
An object representing a Packet layer.
"""
DATA_LAYER = 'data'
def __init__(self, xml_obj=None, raw_mode=False):
self.raw_mode = raw_mode
self._layer_name = xml_obj.attrib['name']
self._all_fields = {}
# We copy over all the fields from the XML object
# Note: we don't read lazily from the XML because the lxml objects are very memory-inefficient
# so we'd rather not save them.
for field in xml_obj.findall('.//field'):
attributes = dict(field.attrib)
field_obj = LayerField(**attributes)
if attributes['name'] in self._all_fields:
# Field name already exists, add this field to the container.
self._all_fields[attributes['name']].add_field(field_obj)
else:
self._all_fields[attributes['name']] = LayerFieldsContainer(field_obj)
def __getattr__(self, item):
val = self.get_field(item)
if val is None:
raise AttributeError()
if self.raw_mode:
return val.raw_value
return val
def __dir__(self):
return dir(type(self)) + self.__dict__.keys() + self.field_names
def get_field(self, name):
"""
Gets the XML field object of the given name.
"""
for field_name, field in self._all_fields.items():
if self._sanitize_field_name(name) == self._sanitize_field_name(field_name):
return field
def get_field_value(self, name, raw=False):
"""
Tries getting the value of the given field.
Tries it in the following order: show (standard nice display), value (raw value), showname (extended nice display).
:param name: The name of the field
:param raw: Only return raw value
:return: str of value
"""
field = self.get_field(name)
if field is None:
return
if raw:
return field.raw_value
return field
@property
def _field_prefix(self):
"""
Prefix to field names in the XML.
"""
if self.layer_name == 'geninfo':
return ''
return self.layer_name + '.'
@property
def field_names(self):
"""
Gets all XML field names of this layer.
:return: list of strings
"""
return [self._sanitize_field_name(field_name)
for field_name in self._all_fields]
@property
def layer_name(self):
if self._layer_name == 'fake-field-wrapper':
return self.DATA_LAYER
return self._layer_name
def _sanitize_field_name(self, field_name):
"""
Sanitizes an XML field name (since it might have characters which would make it inaccessible as a python attribute).
"""
field_name = field_name.replace(self._field_prefix, '')
return field_name.replace('.', '_').replace('-', '_').lower()
def __repr__(self):
return '<%s Layer>' % self.layer_name.upper()
def __str__(self):
if self.layer_name == self.DATA_LAYER:
return 'DATA'
s = 'Layer %s:' % self.layer_name.upper() + os.linesep
for field_line in self._get_all_field_lines():
s += field_line
return s
def pretty_print(self):
tw = py.io.TerminalWriter()
if self.layer_name == self.DATA_LAYER:
tw.write('DATA')
return
tw.write('Layer %s:' % self.layer_name.upper() + os.linesep, yellow=True, bold=True)
for field_line in self._get_all_field_lines():
if ':' in field_line:
field_name, field_line = field_line.split(':', 1)
tw.write(field_name + ':', green=True, bold=True)
tw.write(field_line, bold=True)
def _get_all_fields_with_alternates(self):
all_fields = self._all_fields.values()
all_fields += sum([field.alternate_fields for field in all_fields], [])
return all_fields
def _get_all_field_lines(self):
"""
Returns all lines that represent the fields of the layer (both their names and values).
"""
for field in self._get_all_fields_with_alternates():
if field.hide:
continue
if field.showname:
field_repr = field.showname
elif field.show:
field_repr = field.show
else:
continue
yield '\t' + field_repr + os.linesep
def get_field_by_showname(self, showname):
"""
Gets a field by its "showname"
(the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...', 'User-Agent' is the
showname)
Returns None if not found.
"""
for field in self._get_all_fields_with_alternates():
if field.showname_key == showname:
# Return it if "XXX: whatever == XXX"
return field
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
import collections
import json
import logging
import Queue as queue
import threading
import grpc
from concurrent import futures
import apache_beam as beam
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.transforms.window import GlobalWindows
from apache_beam.runners.api import beam_fn_api_pb2
from apache_beam.runners.portability import maptask_executor_runner
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import sdk_worker
# This module is experimental. No backwards-compatibility guarantees.
def streaming_rpc_handler(cls, method_name):
"""Un-inverts the flow of control between the runner and the sdk harness."""
class StreamingRpcHandler(cls):
_DONE = object()
def __init__(self):
self._push_queue = queue.Queue()
self._pull_queue = queue.Queue()
setattr(self, method_name, self.run)
self._read_thread = threading.Thread(target=self._read)
def run(self, iterator, context):
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
while True:
to_push = self._push_queue.get()
if to_push is self._DONE:
return
yield to_push
def _read(self):
for data in self._inputs:
self._pull_queue.put(data)
def push(self, item):
self._push_queue.put(item)
def pull(self, timeout=None):
return self._pull_queue.get(timeout=timeout)
def empty(self):
return self._pull_queue.empty()
def done(self):
self.push(self._DONE)
self._read_thread.join()
return StreamingRpcHandler()
class OldeSourceSplittableDoFn(beam.DoFn):
"""A DoFn that reads and emits an entire source.
"""
# TODO(robertwb): Make this a full SDF with progress splitting, etc.
def process(self, source):
if isinstance(source, iobase.SourceBundle):
for value in source.source.read(source.source.get_range_tracker(
source.start_position, source.stop_position)):
yield value
else:
# Dataflow native source
with source.reader() as reader:
for value in reader:
yield value
# See DataflowRunner._pardo_fn_data
OLDE_SOURCE_SPLITTABLE_DOFN_DATA = pickler.dumps(
(OldeSourceSplittableDoFn(), (), {}, [],
beam.transforms.core.Windowing(GlobalWindows())))
class FnApiRunner(maptask_executor_runner.MapTaskExecutorRunner):
def __init__(self):
super(FnApiRunner, self).__init__()
self._last_uid = -1
def has_metrics_support(self):
return False
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def _map_task_registration(self, map_task, state_handler,
data_operation_spec):
input_data = {}
runner_sinks = {}
transforms = []
transform_index_to_id = {}
# Maps coders to new coder objects and references.
coders = {}
def coder_id(coder):
if coder not in coders:
coders[coder] = beam_fn_api_pb2.Coder(
function_spec=sdk_worker.pack_function_spec_data(
json.dumps(coder.as_cloud_object()),
sdk_worker.PYTHON_CODER_URN, id=self._next_uid()))
return coders[coder].function_spec.id
def output_tags(op):
return getattr(op, 'output_tags', ['out'])
def as_target(op_input):
input_op_index, input_output_index = op_input
input_op = map_task[input_op_index][1]
return {
'ignored_input_tag':
beam_fn_api_pb2.Target.List(target=[
beam_fn_api_pb2.Target(
primitive_transform_reference=transform_index_to_id[
input_op_index],
name=output_tags(input_op)[input_output_index])
])
}
def outputs(op):
return {
tag: beam_fn_api_pb2.PCollection(coder_reference=coder_id(coder))
for tag, coder in zip(output_tags(op), op.output_coders)
}
for op_ix, (stage_name, operation) in enumerate(map_task):
transform_id = transform_index_to_id[op_ix] = self._next_uid()
if isinstance(operation, operation_specs.WorkerInMemoryWrite):
# Write this data back to the runner.
fn = beam_fn_api_pb2.FunctionSpec(urn=sdk_worker.DATA_OUTPUT_URN,
id=self._next_uid())
if data_operation_spec:
fn.data.Pack(data_operation_spec)
inputs = as_target(operation.input)
side_inputs = {}
runner_sinks[(transform_id, 'out')] = operation
elif isinstance(operation, operation_specs.WorkerRead):
# A Read is either translated to a direct injection of windowed values
# into the sdk worker, or an injection of the source object into the
# sdk worker as data followed by an SDF that reads that source.
if (isinstance(operation.source.source,
worker_runner_base.InMemorySource)
and isinstance(operation.source.source.default_output_coder(),
WindowedValueCoder)):
output_stream = create_OutputStream()
element_coder = (
operation.source.source.default_output_coder().get_impl())
# Re-encode the elements in the nested context and
# concatenate them together
for element in operation.source.source.read(None):
element_coder.encode_to_stream(element, output_stream, True)
target_name = self._next_uid()
input_data[(transform_id, target_name)] = output_stream.get()
fn = beam_fn_api_pb2.FunctionSpec(urn=sdk_worker.DATA_INPUT_URN,
id=self._next_uid())
if data_operation_spec:
fn.data.Pack(data_operation_spec)
inputs = {target_name: beam_fn_api_pb2.Target.List()}
side_inputs = {}
else:
# Read the source object from the runner.
source_coder = beam.coders.DillCoder()
input_transform_id = self._next_uid()
output_stream = create_OutputStream()
source_coder.get_impl().encode_to_stream(
GlobalWindows.windowed_value(operation.source),
output_stream,
True)
target_name = self._next_uid()
input_data[(input_transform_id, target_name)] = output_stream.get()
input_ptransform = beam_fn_api_pb2.PrimitiveTransform(
id=input_transform_id,
function_spec=beam_fn_api_pb2.FunctionSpec(
urn=sdk_worker.DATA_INPUT_URN,
id=self._next_uid()),
# TODO(robertwb): Possible name collision.
step_name=stage_name + '/inject_source',
inputs={target_name: beam_fn_api_pb2.Target.List()},
outputs={
'out':
beam_fn_api_pb2.PCollection(
coder_reference=coder_id(source_coder))
})
if data_operation_spec:
input_ptransform.function_spec.data.Pack(data_operation_spec)
transforms.append(input_ptransform)
# Read the elements out of the source.
fn = sdk_worker.pack_function_spec_data(
OLDE_SOURCE_SPLITTABLE_DOFN_DATA,
sdk_worker.PYTHON_DOFN_URN,
id=self._next_uid())
inputs = {
'ignored_input_tag':
beam_fn_api_pb2.Target.List(target=[
beam_fn_api_pb2.Target(
primitive_transform_reference=input_transform_id,
name='out')
])
}
side_inputs = {}
elif isinstance(operation, operation_specs.WorkerDoFn):
fn = sdk_worker.pack_function_spec_data(
operation.serialized_fn,
sdk_worker.PYTHON_DOFN_URN,
id=self._next_uid())
inputs = as_target(operation.input)
# Store the contents of each side input for state access.
for si in operation.side_inputs:
assert isinstance(si.source, iobase.BoundedSource)
element_coder = si.source.default_output_coder()
view_id = self._next_uid()
# TODO(robertwb): Actually flesh out the ViewFn API.
side_inputs[si.tag] = beam_fn_api_pb2.SideInput(
view_fn=sdk_worker.serialize_and_pack_py_fn(
element_coder, urn=sdk_worker.PYTHON_ITERABLE_VIEWFN_URN,
id=view_id))
# Re-encode the elements in the nested context and
# concatenate them together
output_stream = create_OutputStream()
for element in si.source.read(
si.source.get_range_tracker(None, None)):
element_coder.get_impl().encode_to_stream(
element, output_stream, True)
elements_data = output_stream.get()
state_key = beam_fn_api_pb2.StateKey(function_spec_reference=view_id)
state_handler.Clear(state_key)
state_handler.Append(
beam_fn_api_pb2.SimpleStateAppendRequest(
state_key=state_key, data=[elements_data]))
elif isinstance(operation, operation_specs.WorkerFlatten):
fn = sdk_worker.pack_function_spec_data(
operation.serialized_fn,
sdk_worker.IDENTITY_DOFN_URN,
id=self._next_uid())
inputs = {
'ignored_input_tag':
beam_fn_api_pb2.Target.List(target=[
beam_fn_api_pb2.Target(
primitive_transform_reference=transform_index_to_id[
input_op_index],
name=output_tags(map_task[input_op_index][1])[
input_output_index])
for input_op_index, input_output_index in operation.inputs
])
}
side_inputs = {}
else:
raise TypeError(operation)
ptransform = beam_fn_api_pb2.PrimitiveTransform(
id=transform_id,
function_spec=fn,
step_name=stage_name,
inputs=inputs,
side_inputs=side_inputs,
outputs=outputs(operation))
transforms.append(ptransform)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(), coders=coders.values(),
primitive_transform=transforms)
return beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[process_bundle_descriptor
])), runner_sinks, input_data
def _run_map_task(
self, map_task, control_handler, state_handler, data_plane_handler,
data_operation_spec):
registration, sinks, input_data = self._map_task_registration(
map_task, state_handler, data_operation_spec)
control_handler.push(registration)
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=self._next_uid(),
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=registration.register.
process_bundle_descriptor[0].id))
for (transform_id, name), elements in input_data.items():
data_out = data_plane_handler.output_stream(
process_bundle.instruction_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
data_out.write(elements)
data_out.close()
control_handler.push(process_bundle)
while True:
result = control_handler.pull()
if result.instruction_id == process_bundle.instruction_id:
if result.error:
raise RuntimeError(result.error)
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in sinks.items()]
for output in data_plane_handler.input_elements(
process_bundle.instruction_id, expected_targets):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple not in sinks:
# Unconsumed output.
continue
sink_op = sinks[target_tuple]
coder = sink_op.output_coders[0]
input_stream = create_InputStream(output.data)
elements = []
while input_stream.size() > 0:
elements.append(coder.get_impl().decode_from_stream(
input_stream, True))
if not sink_op.write_windowed_values:
elements = [e.value for e in elements]
for e in elements:
sink_op.output_buffer.append(e)
return
def execute_map_tasks(self, ordered_map_tasks, direct=True):
if direct:
controller = FnApiRunner.DirectController()
else:
controller = FnApiRunner.GrpcController()
try:
for _, map_task in ordered_map_tasks:
logging.info('Running %s', map_task)
self._run_map_task(
map_task, controller.control_handler, controller.state_handler,
controller.data_plane_handler, controller.data_operation_spec())
finally:
controller.close()
class SimpleState(object): # TODO(robertwb): Inherit from GRPC servicer.
def __init__(self):
self._all = collections.defaultdict(list)
def Get(self, state_key):
return beam_fn_api_pb2.Elements.Data(
data=''.join(self._all[self._to_key(state_key)]))
def Append(self, append_request):
self._all[self._to_key(append_request.state_key)].extend(
append_request.data)
def Clear(self, state_key):
try:
del self._all[self._to_key(state_key)]
except KeyError:
pass
@staticmethod
def _to_key(state_key):
return (state_key.function_spec_reference, state_key.window,
state_key.key)
class DirectController(object):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self):
self._responses = []
self.state_handler = FnApiRunner.SimpleState()
self.control_handler = self
self.data_plane_handler = data_plane.InMemoryDataChannel()
self.worker = sdk_worker.SdkWorker(
self.state_handler, data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()))
def push(self, request):
logging.info('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.info('CONTROL RESPONSE %s', response)
self._responses.append(response)
def pull(self):
return self._responses.pop(0)
def done(self):
pass
def close(self):
pass
def data_operation_spec(self):
return None
class GrpcController(object):
"""An grpc based controller for fn API control, state and data planes."""
def __init__(self):
self.state_handler = FnApiRunner.SimpleState()
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.control_handler = streaming_rpc_handler(
beam_fn_api_pb2.BeamFnControlServicer, 'Control')
beam_fn_api_pb2.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
self.data_server.start()
self.control_server.start()
self.worker = sdk_worker.SdkHarness(
grpc.insecure_channel('localhost:%s' % self.control_port))
self.worker_thread = threading.Thread(target=self.worker.run)
logging.info('starting worker')
self.worker_thread.start()
def data_operation_spec(self):
url = 'localhost:%s' % self.data_port
remote_grpc_port = beam_fn_api_pb2.RemoteGrpcPort()
remote_grpc_port.api_service_descriptor.url = url
return remote_grpc_port
def close(self):
self.control_handler.done()
self.worker_thread.join()
self.data_plane_handler.close()
self.control_server.stop(5).wait()
self.data_server.stop(5).wait()
|
|
"""
Implementation of double ended queue.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Iterable
class Deque:
"""
Deque data structure.
Operations
----------
append(val: Any) -> None
appendleft(val: Any) -> None
extend(iter: Iterable) -> None
extendleft(iter: Iterable) -> None
pop() -> Any
popleft() -> Any
Observers
---------
is_empty() -> bool
Attributes
----------
_front: _Node
front of the deque a.k.a. the first element
_back: _Node
back of the element a.k.a. the last element
_len: int
the number of nodes
"""
__slots__ = ["_front", "_back", "_len"]
@dataclass
class _Node:
"""
Representation of a node.
Contains a value and a pointer to the next node as well as to the previous one.
"""
val: Any = None
next: Deque._Node | None = None
prev: Deque._Node | None = None
class _Iterator:
"""
Helper class for iteration. Will be used to implement iteration.
Attributes
----------
_cur: _Node
the current node of the iteration.
"""
__slots__ = ["_cur"]
def __init__(self, cur: Deque._Node | None) -> None:
self._cur = cur
def __iter__(self) -> Deque._Iterator:
"""
>>> our_deque = Deque([1, 2, 3])
>>> iterator = iter(our_deque)
"""
return self
def __next__(self) -> Any:
"""
>>> our_deque = Deque([1, 2, 3])
>>> iterator = iter(our_deque)
>>> next(iterator)
1
>>> next(iterator)
2
>>> next(iterator)
3
"""
if self._cur is None:
# finished iterating
raise StopIteration
val = self._cur.val
self._cur = self._cur.next
return val
def __init__(self, iterable: Iterable[Any] | None = None) -> None:
self._front: Any = None
self._back: Any = None
self._len: int = 0
if iterable is not None:
# append every value to the deque
for val in iterable:
self.append(val)
def append(self, val: Any) -> None:
"""
Adds val to the end of the deque.
Time complexity: O(1)
>>> our_deque_1 = Deque([1, 2, 3])
>>> our_deque_1.append(4)
>>> our_deque_1
[1, 2, 3, 4]
>>> our_deque_2 = Deque('ab')
>>> our_deque_2.append('c')
>>> our_deque_2
['a', 'b', 'c']
>>> from collections import deque
>>> deque_collections_1 = deque([1, 2, 3])
>>> deque_collections_1.append(4)
>>> deque_collections_1
deque([1, 2, 3, 4])
>>> deque_collections_2 = deque('ab')
>>> deque_collections_2.append('c')
>>> deque_collections_2
deque(['a', 'b', 'c'])
>>> list(our_deque_1) == list(deque_collections_1)
True
>>> list(our_deque_2) == list(deque_collections_2)
True
"""
node = self._Node(val, None, None)
if self.is_empty():
# front = back
self._front = self._back = node
self._len = 1
else:
# connect nodes
self._back.next = node
node.prev = self._back
self._back = node # assign new back to the new node
self._len += 1
# make sure there were no errors
assert not self.is_empty(), "Error on appending value."
def appendleft(self, val: Any) -> None:
"""
Adds val to the beginning of the deque.
Time complexity: O(1)
>>> our_deque_1 = Deque([2, 3])
>>> our_deque_1.appendleft(1)
>>> our_deque_1
[1, 2, 3]
>>> our_deque_2 = Deque('bc')
>>> our_deque_2.appendleft('a')
>>> our_deque_2
['a', 'b', 'c']
>>> from collections import deque
>>> deque_collections_1 = deque([2, 3])
>>> deque_collections_1.appendleft(1)
>>> deque_collections_1
deque([1, 2, 3])
>>> deque_collections_2 = deque('bc')
>>> deque_collections_2.appendleft('a')
>>> deque_collections_2
deque(['a', 'b', 'c'])
>>> list(our_deque_1) == list(deque_collections_1)
True
>>> list(our_deque_2) == list(deque_collections_2)
True
"""
node = self._Node(val, None, None)
if self.is_empty():
# front = back
self._front = self._back = node
self._len = 1
else:
# connect nodes
node.next = self._front
self._front.prev = node
self._front = node # assign new front to the new node
self._len += 1
# make sure there were no errors
assert not self.is_empty(), "Error on appending value."
def extend(self, iter: Iterable[Any]) -> None:
"""
Appends every value of iter to the end of the deque.
Time complexity: O(n)
>>> our_deque_1 = Deque([1, 2, 3])
>>> our_deque_1.extend([4, 5])
>>> our_deque_1
[1, 2, 3, 4, 5]
>>> our_deque_2 = Deque('ab')
>>> our_deque_2.extend('cd')
>>> our_deque_2
['a', 'b', 'c', 'd']
>>> from collections import deque
>>> deque_collections_1 = deque([1, 2, 3])
>>> deque_collections_1.extend([4, 5])
>>> deque_collections_1
deque([1, 2, 3, 4, 5])
>>> deque_collections_2 = deque('ab')
>>> deque_collections_2.extend('cd')
>>> deque_collections_2
deque(['a', 'b', 'c', 'd'])
>>> list(our_deque_1) == list(deque_collections_1)
True
>>> list(our_deque_2) == list(deque_collections_2)
True
"""
for val in iter:
self.append(val)
def extendleft(self, iter: Iterable[Any]) -> None:
"""
Appends every value of iter to the beginning of the deque.
Time complexity: O(n)
>>> our_deque_1 = Deque([1, 2, 3])
>>> our_deque_1.extendleft([0, -1])
>>> our_deque_1
[-1, 0, 1, 2, 3]
>>> our_deque_2 = Deque('cd')
>>> our_deque_2.extendleft('ba')
>>> our_deque_2
['a', 'b', 'c', 'd']
>>> from collections import deque
>>> deque_collections_1 = deque([1, 2, 3])
>>> deque_collections_1.extendleft([0, -1])
>>> deque_collections_1
deque([-1, 0, 1, 2, 3])
>>> deque_collections_2 = deque('cd')
>>> deque_collections_2.extendleft('ba')
>>> deque_collections_2
deque(['a', 'b', 'c', 'd'])
>>> list(our_deque_1) == list(deque_collections_1)
True
>>> list(our_deque_2) == list(deque_collections_2)
True
"""
for val in iter:
self.appendleft(val)
def pop(self) -> Any:
"""
Removes the last element of the deque and returns it.
Time complexity: O(1)
@returns topop.val: the value of the node to pop.
>>> our_deque = Deque([1, 2, 3, 15182])
>>> our_popped = our_deque.pop()
>>> our_popped
15182
>>> our_deque
[1, 2, 3]
>>> from collections import deque
>>> deque_collections = deque([1, 2, 3, 15182])
>>> collections_popped = deque_collections.pop()
>>> collections_popped
15182
>>> deque_collections
deque([1, 2, 3])
>>> list(our_deque) == list(deque_collections)
True
>>> our_popped == collections_popped
True
"""
# make sure the deque has elements to pop
assert not self.is_empty(), "Deque is empty."
topop = self._back
self._back = self._back.prev # set new back
self._back.next = (
None # drop the last node - python will deallocate memory automatically
)
self._len -= 1
return topop.val
def popleft(self) -> Any:
"""
Removes the first element of the deque and returns it.
Time complexity: O(1)
@returns topop.val: the value of the node to pop.
>>> our_deque = Deque([15182, 1, 2, 3])
>>> our_popped = our_deque.popleft()
>>> our_popped
15182
>>> our_deque
[1, 2, 3]
>>> from collections import deque
>>> deque_collections = deque([15182, 1, 2, 3])
>>> collections_popped = deque_collections.popleft()
>>> collections_popped
15182
>>> deque_collections
deque([1, 2, 3])
>>> list(our_deque) == list(deque_collections)
True
>>> our_popped == collections_popped
True
"""
# make sure the deque has elements to pop
assert not self.is_empty(), "Deque is empty."
topop = self._front
self._front = self._front.next # set new front and drop the first node
self._front.prev = None
self._len -= 1
return topop.val
def is_empty(self) -> bool:
"""
Checks if the deque is empty.
Time complexity: O(1)
>>> our_deque = Deque([1, 2, 3])
>>> our_deque.is_empty()
False
>>> our_empty_deque = Deque()
>>> our_empty_deque.is_empty()
True
>>> from collections import deque
>>> empty_deque_collections = deque()
>>> list(our_empty_deque) == list(empty_deque_collections)
True
"""
return self._front is None
def __len__(self) -> int:
"""
Implements len() function. Returns the length of the deque.
Time complexity: O(1)
>>> our_deque = Deque([1, 2, 3])
>>> len(our_deque)
3
>>> our_empty_deque = Deque()
>>> len(our_empty_deque)
0
>>> from collections import deque
>>> deque_collections = deque([1, 2, 3])
>>> len(deque_collections)
3
>>> empty_deque_collections = deque()
>>> len(empty_deque_collections)
0
>>> len(our_empty_deque) == len(empty_deque_collections)
True
"""
return self._len
def __eq__(self, other: object) -> bool:
"""
Implements "==" operator. Returns if *self* is equal to *other*.
Time complexity: O(n)
>>> our_deque_1 = Deque([1, 2, 3])
>>> our_deque_2 = Deque([1, 2, 3])
>>> our_deque_1 == our_deque_2
True
>>> our_deque_3 = Deque([1, 2])
>>> our_deque_1 == our_deque_3
False
>>> from collections import deque
>>> deque_collections_1 = deque([1, 2, 3])
>>> deque_collections_2 = deque([1, 2, 3])
>>> deque_collections_1 == deque_collections_2
True
>>> deque_collections_3 = deque([1, 2])
>>> deque_collections_1 == deque_collections_3
False
>>> (our_deque_1 == our_deque_2) == (deque_collections_1 == deque_collections_2)
True
>>> (our_deque_1 == our_deque_3) == (deque_collections_1 == deque_collections_3)
True
"""
if not isinstance(other, Deque):
return NotImplemented
me = self._front
oth = other._front
# if the length of the deques are not the same, they are not equal
if len(self) != len(other):
return False
while me is not None and oth is not None:
# compare every value
if me.val != oth.val:
return False
me = me.next
oth = oth.next
return True
def __iter__(self) -> Deque._Iterator:
"""
Implements iteration.
Time complexity: O(1)
>>> our_deque = Deque([1, 2, 3])
>>> for v in our_deque:
... print(v)
1
2
3
>>> from collections import deque
>>> deque_collections = deque([1, 2, 3])
>>> for v in deque_collections:
... print(v)
1
2
3
"""
return Deque._Iterator(self._front)
def __repr__(self) -> str:
"""
Implements representation of the deque.
Represents it as a list, with its values between '[' and ']'.
Time complexity: O(n)
>>> our_deque = Deque([1, 2, 3])
>>> our_deque
[1, 2, 3]
"""
values_list = []
aux = self._front
while aux is not None:
# append the values in a list to display
values_list.append(aux.val)
aux = aux.next
return "[" + ", ".join(repr(val) for val in values_list) + "]"
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
#!/usr/bin/env python
#
# Helper for automating the creation of a package by looking at you
# current directory and asking the user questions.
#
# Available as either a stand-alone file or callable from the distutils2
# package:
#
# python -m distutils2.mkcfg
# or:
# python mkcfg.py
#
# Written by Sean Reifschneider <jafo@tummy.com>
#
# Original TODO list:
# Look for a license file and automatically add the category.
# When a .c file is found during the walk, can we add it as an extension?
# Ask if there is a maintainer different that the author
# Ask for the platform (can we detect this via "import win32" or something?)
# Ask for the dependencies.
# Ask for the Requires-Dist
# Ask for the Provides-Dist
# Ask for a description
# Detect scripts (not sure how. #! outside of package?)
import os
import sys
import glob
import re
import shutil
from ConfigParser import RawConfigParser
from textwrap import dedent
#try:
# from hashlib import md5
#except ImportError:
# from distutils2._backport.hashlib import md5
from hashlib import md5
# importing this with an underscore as it should be replaced by the
# dict form or another structures for all purposes
#from distutils2._trove import all_classifiers as _CLASSIFIERS_LIST
from _trove import all_classifiers as _CLASSIFIERS_LIST
#from distutils2._backport import sysconfig
import sysconfig
_FILENAME = 'setup.cfg'
_helptext = {
'name': '''
The name of the program to be packaged, usually a single word composed
of lower-case characters such as "python", "sqlalchemy", or "CherryPy".
''',
'version': '''
Version number of the software, typically 2 or 3 numbers separated by dots
such as "1.00", "0.6", or "3.02.01". "0.1.0" is recommended for initial
development.
''',
'summary': '''
A one-line summary of what this project is or does, typically a sentence 80
characters or less in length.
''',
'author': '''
The full name of the author (typically you).
''',
'author_email': '''
E-mail address of the project author (typically you).
''',
'do_classifier': '''
Trove classifiers are optional identifiers that allow you to specify the
intended audience by saying things like "Beta software with a text UI
for Linux under the PSF license. However, this can be a somewhat involved
process.
''',
'packages': '''
You can provide a package name contained in your project.
''',
'modules': '''
You can provide a python module contained in your project.
''',
'extra_files': '''
You can provide extra files/dirs contained in your project.
It has to follow the template syntax. XXX add help here.
''',
'home_page': '''
The home page for the project, typically starting with "http://".
''',
'trove_license': '''
Optionally you can specify a license. Type a string that identifies a common
license, and then you can select a list of license specifiers.
''',
'trove_generic': '''
Optionally, you can set other trove identifiers for things such as the
human language, programming language, user interface, etc...
''',
'setup.py found': '''
The setup.py script will be executed to retrieve the metadata.
A wizard will be run if you answer "n",
''',
}
# XXX everything needs docstrings and tests (both low-level tests of various
# methods and functional tests of running the script)
def ask_yn(question, default=None, helptext=None):
question += ' (y/n)'
while True:
answer = ask(question, default, helptext, required=True)
if answer and answer[0].lower() in 'yn':
return answer[0].lower()
print '\nERROR: You must select "Y" or "N".\n'
def ask(question, default=None, helptext=None, required=True,
lengthy=False, multiline=False):
prompt = '%s: ' % (question,)
if default:
prompt = '%s [%s]: ' % (question, default)
if default and len(question) + len(default) > 70:
prompt = '%s\n [%s]: ' % (question, default)
if lengthy or multiline:
prompt += '\n > '
if not helptext:
helptext = 'No additional help available.'
helptext = helptext.strip("\n")
while True:
sys.stdout.write(prompt)
sys.stdout.flush()
line = sys.stdin.readline().strip()
if line == '?':
print '=' * 70
print helptext
print '=' * 70
continue
if default and not line:
return default
if not line and required:
print '*' * 70
print 'This value cannot be empty.'
print '==========================='
if helptext:
print helptext
print '*' * 70
continue
return line
def _build_classifiers_dict(classifiers):
d = {}
for key in classifiers:
subDict = d
for subkey in key.split(' :: '):
if not subkey in subDict:
subDict[subkey] = {}
subDict = subDict[subkey]
return d
CLASSIFIERS = _build_classifiers_dict(_CLASSIFIERS_LIST)
def _build_licences(classifiers):
res = []
for index, item in enumerate(classifiers):
if not item.startswith('License :: '):
continue
res.append((index, item.split(' :: ')[-1].lower()))
return res
LICENCES = _build_licences(_CLASSIFIERS_LIST)
class MainProgram(object):
def __init__(self):
self.configparser = None
self.classifiers = set([])
self.data = {}
self.data['classifier'] = self.classifiers
self.data['packages'] = []
self.data['modules'] = []
self.data['platform'] = []
self.data['resources'] = []
self.data['extra_files'] = []
self.data['scripts'] = []
self.load_config_file()
def lookup_option(self, key):
if not self.configparser.has_option('DEFAULT', key):
return None
return self.configparser.get('DEFAULT', key)
def load_config_file(self):
self.configparser = RawConfigParser()
# TODO replace with section in distutils config file
#XXX freedesktop
self.configparser.read(os.path.expanduser('~/.mkcfg'))
self.data['author'] = self.lookup_option('author')
self.data['author_email'] = self.lookup_option('author_email')
def update_config_file(self):
valuesDifferent = False
# FIXME looking only for those two fields seems wrong
for compareKey in ('author', 'author_email'):
if self.lookup_option(compareKey) != self.data[compareKey]:
valuesDifferent = True
self.configparser.set('DEFAULT', compareKey,
self.data[compareKey])
if not valuesDifferent:
return
#XXX freedesktop
fp = open(os.path.expanduser('~/.mkcfgpy'), 'w')
try:
self.configparser.write(fp)
finally:
fp.close()
def load_existing_setup_script(self, parent_dir=None):
""" Generate a setup.cfg from an existing setup.py.
It only exports the distutils metadata (setuptools specific metadata
is not actually supported).
"""
if parent_dir is None:
setuppath = 'setup.py'
else:
setuppath = parent_dir + '/setup.py'
os.chdir(parent_dir)
if not os.path.exists(setuppath):
return
#else:
# ans = ask_yn(('A legacy setup.py has been found.\n'
# 'Would you like to convert it to a setup.cfg ?'),
# 'y',
# _helptext['setup.py found'])
# if ans != 'y':
# return
data = self.data
def setup(**attrs):
"""Mock the setup(**attrs) in order to retrive metadata."""
# use the distutils v1 processings to correctly parse metadata.
#XXX we could also use the setuptools distibution ???
from distutils.dist import Distribution
dist = Distribution(attrs)
dist.parse_config_files()
# 1. retrieves metadata that are quite similar PEP314<->PEP345
labels = (('name',) * 2,
('version',) * 2,
('author',) * 2,
('author_email',) * 2,
('maintainer',) * 2,
('maintainer_email',) * 2,
('description', 'summary'),
('long_description', 'description'),
('url', 'home_page'),
('platforms', 'platform'))
if sys.version[:3] >= '2.5':
labels += (('provides', 'provides-dist'),
('obsoletes', 'obsoletes-dist'),
('requires', 'requires-dist'),)
get = lambda lab: getattr(dist.metadata, lab.replace('-', '_'))
data.update((new, get(old)) for (old, new) in labels if get(old))
# 2. retrieves data that requires special processings.
data['classifier'].update(dist.get_classifiers() or [])
data['scripts'].extend(dist.scripts or [])
data['packages'].extend(dist.packages or [])
data['modules'].extend(dist.py_modules or [])
# 2.1 data_files -> resources.
if dist.data_files:
if len(dist.data_files) < 2 or \
isinstance(dist.data_files[1], str):
dist.data_files = [('', dist.data_files)]
# add tokens in the destination paths
vars = {'distribution.name': data['name']}
path_tokens = sysconfig.get_paths(vars=vars).items()
# sort tokens to use the longest one first
# TODO chain two sorted with key arguments, remove cmp
path_tokens.sort(cmp=lambda x, y: cmp(len(y), len(x)),
key=lambda x: x[1])
for dest, srcs in (dist.data_files or []):
dest = os.path.join(sys.prefix, dest)
for tok, path in path_tokens:
if dest.startswith(path):
dest = ('{%s}' % tok) + dest[len(path):]
files = [('/ '.join(src.rsplit('/', 1)), dest)
for src in srcs]
data['resources'].extend(files)
continue
# 2.2 package_data -> extra_files
package_dirs = dist.package_dir or {}
for package, extras in dist.package_data.iteritems() or []:
package_dir = package_dirs.get(package, package)
files = [os.path.join(package_dir, f) for f in extras]
data['extra_files'].extend(files)
# Use README file if its content is the desciption
if "description" in data:
ref = md5(re.sub('\s', '', self.data['description']).lower())
ref = ref.digest()
for readme in glob.glob('README*'):
fp = open(readme)
try:
contents = fp.read()
finally:
fp.close()
val = md5(re.sub('\s', '', contents.lower())).digest()
if val == ref:
del data['description']
data['description-file'] = readme
break
# apply monkey patch to distutils (v1) and setuptools (if needed)
# (abord the feature if distutils v1 has been killed)
try:
import distutils.core as DC
DC.setup # ensure distutils v1
except (ImportError, AttributeError):
return
saved_setups = [(DC, DC.setup)]
DC.setup = setup
try:
import setuptools
saved_setups.append((setuptools, setuptools.setup))
setuptools.setup = setup
except (ImportError, AttributeError):
pass
# get metadata by executing the setup.py with the patched setup(...)
success = False # for python < 2.4
try:
pyenv = globals().copy()
execfile(setuppath, pyenv)
success = True
finally: # revert monkey patches
for patched_module, original_setup in saved_setups:
patched_module.setup = original_setup
if not self.data:
raise ValueError('Unable to load metadata from setup.py')
return success
def inspect_file(self, path):
fp = open(path, 'r')
try:
for _ in xrange(10):
line = fp.readline()
m = re.match(r'^#!.*python((?P<major>\d)(\.\d+)?)?$', line)
if m:
if m.group('major') == '3':
self.classifiers.add(
'Programming Language :: Python :: 3')
else:
self.classifiers.add(
'Programming Language :: Python :: 2')
finally:
fp.close()
def inspect_directory(self):
dirName = os.path.basename(os.getcwd())
self.data['name'] = dirName
m = re.match(r'(.*)-(\d.+)', dirName)
if m:
self.data['name'] = m.group(1)
self.data['version'] = m.group(2)
def query_user(self):
self.data['name'] = ask('Project name', self.data['name'],
_helptext['name'])
self.data['version'] = ask('Current version number',
self.data.get('version'), _helptext['version'])
self.data['summary'] = ask('Package summary',
self.data.get('summary'), _helptext['summary'],
lengthy=True)
self.data['author'] = ask('Author name',
self.data.get('author'), _helptext['author'])
self.data['author_email'] = ask('Author e-mail address',
self.data.get('author_email'), _helptext['author_email'])
self.data['home_page'] = ask('Project Home Page',
self.data.get('home_page'), _helptext['home_page'],
required=False)
if ask_yn('Do you want me to automatically build the file list '
'with everything I can find in the current directory ? '
'If you say no, you will have to define them manually.') == 'y':
self._find_files()
else:
while ask_yn('Do you want to add a single module ?'
' (you will be able to add full packages next)',
helptext=_helptext['modules']) == 'y':
self._set_multi('Module name', 'modules')
while ask_yn('Do you want to add a package ?',
helptext=_helptext['packages']) == 'y':
self._set_multi('Package name', 'packages')
while ask_yn('Do you want to add an extra file ?',
helptext=_helptext['extra_files']) == 'y':
self._set_multi('Extra file/dir name', 'extra_files')
if ask_yn('Do you want to set Trove classifiers?',
helptext=_helptext['do_classifier']) == 'y':
self.set_classifier()
def _find_files(self):
# we are looking for python modules and packages,
# other stuff are added as regular files
pkgs = self.data['packages']
modules = self.data['modules']
extra_files = self.data['extra_files']
def is_package(path):
return os.path.exists(os.path.join(path, '__init__.py'))
curdir = os.getcwd()
scanned = []
_pref = ['lib', 'include', 'dist', 'build', '.', '~']
_suf = ['.pyc']
def to_skip(path):
path = relative(path)
for pref in _pref:
if path.startswith(pref):
return True
for suf in _suf:
if path.endswith(suf):
return True
return False
def relative(path):
return path[len(curdir) + 1:]
def dotted(path):
res = relative(path).replace(os.path.sep, '.')
if res.endswith('.py'):
res = res[:-len('.py')]
return res
# first pass : packages
for root, dirs, files in os.walk(curdir):
if to_skip(root):
continue
for dir_ in sorted(dirs):
if to_skip(dir_):
continue
fullpath = os.path.join(root, dir_)
dotted_name = dotted(fullpath)
if is_package(fullpath) and dotted_name not in pkgs:
pkgs.append(dotted_name)
scanned.append(fullpath)
# modules and extra files
for root, dirs, files in os.walk(curdir):
if to_skip(root):
continue
if True in [root.startswith(path) for path in scanned]:
continue
for file in sorted(files):
fullpath = os.path.join(root, file)
if to_skip(fullpath):
continue
# single module ?
if os.path.splitext(file)[-1] == '.py':
modules.append(dotted(fullpath))
else:
extra_files.append(relative(fullpath))
def _set_multi(self, question, name):
existing_values = self.data[name]
value = ask(question, helptext=_helptext[name]).strip()
if value not in existing_values:
existing_values.append(value)
def set_classifier(self):
self.set_devel_status(self.classifiers)
self.set_license(self.classifiers)
self.set_other_classifier(self.classifiers)
def set_other_classifier(self, classifiers):
if ask_yn('Do you want to set other trove identifiers', 'n',
_helptext['trove_generic']) != 'y':
return
self.walk_classifiers(classifiers, [CLASSIFIERS], '')
def walk_classifiers(self, classifiers, trovepath, desc):
trove = trovepath[-1]
if not trove:
return
for key in sorted(trove):
if len(trove[key]) == 0:
if ask_yn('Add "%s"' % desc[4:] + ' :: ' + key, 'n') == 'y':
classifiers.add(desc[4:] + ' :: ' + key)
continue
if ask_yn('Do you want to set items under\n "%s" (%d sub-items)'
% (key, len(trove[key])), 'n',
_helptext['trove_generic']) == 'y':
self.walk_classifiers(classifiers, trovepath + [trove[key]],
desc + ' :: ' + key)
def set_license(self, classifiers):
while True:
license = ask('What license do you use',
helptext=_helptext['trove_license'], required=False)
if not license:
return
license_words = license.lower().split(' ')
found_list = []
for index, licence in LICENCES:
for word in license_words:
if word in licence:
found_list.append(index)
break
if len(found_list) == 0:
print('ERROR: Could not find a matching license for "%s"' %
license)
continue
question = 'Matching licenses:\n\n'
for index, list_index in enumerate(found_list):
question += ' %s) %s\n' % (index + 1,
_CLASSIFIERS_LIST[list_index])
question += ('\nType the number of the license you wish to use or '
'? to try again:')
choice = ask(question, required=False)
if choice == '?':
continue
if choice == '':
return
try:
index = found_list[int(choice) - 1]
except ValueError:
print ("ERROR: Invalid selection, type a number from the list "
"above.")
classifiers.add(_CLASSIFIERS_LIST[index])
return
def set_devel_status(self, classifiers):
while True:
choice = ask(dedent('''\
Please select the project status:
1 - Planning
2 - Pre-Alpha
3 - Alpha
4 - Beta
5 - Production/Stable
6 - Mature
7 - Inactive
Status'''), required=False)
if choice:
try:
choice = int(choice) - 1
key = ['Development Status :: 1 - Planning',
'Development Status :: 2 - Pre-Alpha',
'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
'Development Status :: 6 - Mature',
'Development Status :: 7 - Inactive'][choice]
classifiers.add(key)
return
except (IndexError, ValueError):
print ("ERROR: Invalid selection, type a single digit "
"number.")
def _dotted_packages(self, data):
packages = sorted(data)
modified_pkgs = []
for pkg in packages:
pkg = pkg.lstrip('./')
pkg = pkg.replace('/', '.')
modified_pkgs.append(pkg)
return modified_pkgs
def write_setup_script(self, parent_dir=None):
if parent_dir is not None:
_filename = parent_dir + '/' + _FILENAME
else:
_filename = _FILENAME
if os.path.exists(_filename):
if os.path.exists('%s.old' % _filename):
print("ERROR: %(name)s.old backup exists, please check that "
"current %(name)s is correct and remove %(name)s.old" %
{'name': _filename})
return
shutil.move(_filename, '%s.old' % _filename)
fp = open(_filename, 'w')
try:
fp.write('[metadata]\n')
# simple string entries
for name in ('name', 'version', 'summary', 'download_url'):
fp.write('%s = %s\n' % (name, self.data.get(name, 'UNKNOWN')))
# optional string entries
if 'keywords' in self.data and self.data['keywords']:
fp.write('keywords = %s\n' % ' '.join(self.data['keywords']))
for name in ('home_page', 'author', 'author_email',
'maintainer', 'maintainer_email', 'description-file'):
if name in self.data and self.data[name]:
fp.write('%s = %s\n' % (name, self.data[name]))
if 'description' in self.data:
fp.write(
'description = %s\n'
% '\n |'.join(self.data['description'].split('\n')))
# multiple use string entries
for name in ('platform', 'supported-platform', 'classifier',
'requires-dist', 'provides-dist', 'obsoletes-dist',
'requires-external'):
if not(name in self.data and self.data[name]):
continue
fp.write('%s = ' % name)
fp.write(''.join(' %s\n' % val
for val in self.data[name]).lstrip())
fp.write('\n[files]\n')
for name in ('packages', 'modules', 'scripts',
'package_data', 'extra_files'):
if not(name in self.data and self.data[name]):
continue
fp.write('%s = %s\n'
% (name, '\n '.join(self.data[name]).strip()))
fp.write('\nresources =\n')
for src, dest in self.data['resources']:
fp.write(' %s = %s\n' % (src, dest))
fp.write('\n')
finally:
fp.close()
os.chmod(_filename, 0644)
print 'Wrote "%s".' % _filename
def main():
"""Main entry point."""
program = MainProgram()
# uncomment when implemented
if not program.load_existing_setup_script():
program.inspect_directory()
program.query_user()
program.update_config_file()
program.write_setup_script()
# distutils2.util.cfg_to_args()
if __name__ == '__main__':
main()
|
|
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from selenium.webdriver.common.keys import Keys
#from selenium.webdriver.common.action_chains import ActionChains
import time
import datetime
import csv
import random
import sys
import urllib2
import socket
#from metricsCollect import metricsCollect
#------------------------------------------------------------
#--- Get Interactive Input for number of loops to execute ---
#numLoops = int(sys.argv[1])
timeToRun=int(sys.argv[1])
endTime=int(time.time()+timeToRun)
#--- Browser definition for Grid usage ----------
browser = sys.argv[2]
#--- SeGrid Hub designation --------------------
hub = sys.argv[3]
instID = sys.argv[4]
l=[]
#statsDHost='ec2-54-80-6-76.compute-1.amazonaws.com'
statsDHost='statsd.elsst.com'
"""
Define UDP connection to send data to statsD
"""
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
## statsd host & port
addr=(statsDHost,8125)
#--- Read List of PIIs -----------------
PII=[]
try:
csvRd = csv.reader(open('/home/ubuntu/PIIs_250k.csv','rb'))
#csvRd = csv.reader(open('/home/ubuntu/PIIs_30k.csv','rb'))
piiCount = 29000
except:
csvRd = csv.reader(open('./PIIs_250k.csv','rb'))
piiCount = 29000
for j in csvRd:
PII.append(j)
#--- Read List of Journals -----------------
JRNL=[]
try:
csvRd = csv.reader(open('/home/ubuntu/Journals.csv','rb'))
except:
csvRd = csv.reader(open('./Journals.csv','rb'))
for j in csvRd:
JRNL.append(j)
#--- Read List of Search Terms -----------------
SRCH=[]
try:
csvRd = csv.reader(open('/home/ubuntu/SDSrchTerms.csv','rb'))
except:
csvRd = csv.reader(open('./SDSrchTerms.csv','rb'))
for j in csvRd:
SRCH.append(j)
#---------------------------------------
# Function to gracefully exit the browser
# after incrementing loop variables
#-----------
def egress():
try:
driver.quit()
# except WindowsError:
# print ("****WindowsError - pass? ****")
# pass
except urllib2.URLError:
# print ("----URLError - pass? ----")
pass
#------------------------------------------------------
# Function to send error details for tracking
#------------------------------------------------------
def errorReport(hName,titlN,msg):
# l.append('sd.Selenium.error.'+base+'.'+titlN+':1|c\n')
try:
stats
stats+='sd.Selenium.error.'+base+'.'+titlN+':1|c\n'
except:
stats='sd.Selenium.error.'+base+'.'+titlN+':1|c\n'
try:
print('error - '+msg+' '+titlN+' '+driver.title)
except:
print('error - '+msg+' '+titlN)
#------------------------------------------------------
# Function to send error details for tracking
#------------------------------------------------------
def newBrowser(base):
# l.append('sd.Selenium.'+base+'.newBrowser:1|c\n')
stats+='sd.Selenium.'+base+'.newBrowser:1|c\n'
print('new Browser - '+base)
#------------------------------------------------------
# Gather Performance data to send
#------------------------------------------------------
def metricsCollect(dtitl,d,base):
# mets=''
# metrics=['responseStart','responseEnd','domInteractive','loadEventEnd','domContentLoadedEventEnd']
metrics={'ttfb':'responseStart','html':'responseEnd','pgi':'domInteractive','pgl':'loadEventEnd','startRender':'domContentLoadedEventEnd'}
# print(dtitl+' - trying metricsCollect')
try:
# print('try some script execute')
navS = d.execute_script('return performance.timing.navigationStart')
# print('navS: '+str(navS))
# print('try getting other metrics')
for i in metrics:
compVal=int(d.execute_script('return performance.timing.'+metrics[i])-navS)
if(compVal>0):
l.append('sd.Selenium.'+base+'.'+dtitl+'.'+str(i)+':'+str(compVal)+'|ms\n')
if (dtitl.find('Content_Delivery') != -1):
try:
# print('try return prs.abs_end')
pcrT=d.execute_script("return prs.abs_end")
except:
pcrT=0
elif(dtitl.find('Category_Home') != -1):
try:
prs=d.execute_script('return prs')
prsT=[]
prsT.append(prs['pcr'])
prsT.append(prs['pcr_nav'])
pcrT=sorted(prsT)[1]
except:
pcrT=0
else:
# print('found a different page! - '+dtitl)
try:
pcrT=execute_script("return prs['pcr']")
except:
try:
prs=execute_script('return prs')
pcrT=prs['pcr']
except:
pcrT=0
if pcrT > navS:
l.append('sd.Selenium.'+base+'.'+dtitl+'.pcr:'+str(int(pcrT-navS))+'|ms\n')
# print l
# print UDPSock.sendto(mets,addr)
# print('l '+l)
except:
# print('scripts no workie')
pass
return l
#------------------------------------------------------
# Function to execute a request or page interaction
# handles associated error conditions
# Makes call to collect page timing
#-------------
def getPage(resource):
try:
#driver.get("http://"+baseURL)
resource
if 'Unable to process' in driver.title:
# print 'Error - Unable to process, wait 60 seconds'
errorReport(base,titl,'Unable to Process')
time.sleep(60)
exit
elif 'ScienceDirect Error' in driver.title:
dt = datetime.datetime.now()
dTm = str(dt.strftime("%Y/%m/%d %H:%M:%S%Z"))
# print 'SD-00x Error'+dTm
errorReport(base,titl,'SD-00x')
time.sleep(1)
exit
elif 'Error' in driver.title:
# print 'Error, wait 60 seconds'
time.sleep(10)
exit
else:
# l.append('sd.Selenium.'+base+'.'+titl+'.pass:1|c\n')
time.sleep(.5)
# print('trying metricsCollect')
try:
# print('try to append to stats')
metricsCollect(titl,driver,base)
# print(testHolder)
# stats +=''.join(testHolder)
# print(stats)
except:
# print('no append to stats, create instead')
# stats=''.join(metricsCollect(titl,driver,base))
pass
except urllib2.URLError:
# print 'URLError'
errorReport(base,titl,'URLError')
pass
except:
# print (titl+' fail')
errorReport(base,titl,'Other')
pass
#=============================================================
#-------------------------------------------------------------
# Script Begins Here
#-------------------------------------------------------------
#=============================================================
#--- Define static Article Value for looping
idx=0
while endTime > time.time():
"""
Define capabilities of remote webdriver
Specifically: assign browser type
"""
try:
# stats=''
# print('loading browser')
driver=webdriver.Remote("http://"+hub+":4200/wd/hub",desired_capabilities={"browserName": browser})
#driver=webdriver.Chrome()
# print('wait for it...')
# print datetime.datetime.now()
time.sleep(.25)
# Initialize array for holding metrics to send to graphite
# l = []
#-------------------------------------------------
# Define baseURL for following transactions
#-------------------------------------------------
baseIDX=int(random.random()*300)
if (baseIDX%3==0):
baseURL = 'cdc311-www.sciencedirect.com'
base='cdc311'
if (baseIDX%3==1):
baseURL = 'cdc314-www.sciencedirect.com'
base='cdc314'
if (baseIDX%3==2):
baseURL = 'cdc318-www.sciencedirect.com'
base='cdc318'
baseURL = 'cdc311-www.sciencedirect.com'
base='cdc311'
try:
newBrowser(base)
except:
pass
#-------------------------------------------------
# Load Home Page & Authenticate x% of iterations
#-------------------------------------------------
login = int(random.random()*100)
if (login%100 < 50):
#--- Request Home Page ----------------------------------------
titl='Home_Page'
getPage(driver.get("http://"+baseURL))
#--- Find Login Form & Fill in data ---------------------------
try:
driver.find_element_by_id("loginPlusScript").click()
driver.find_element_by_id('username').send_keys('Webmetrics')
driver.find_element_by_id('password').send_keys('Scidir_test')
#--- Submit the form based on element ID ----------------
titl='U/P Auth to Home Page'
driver.find_element_by_name("arrow").click()
#--- If choose Org screen displayed, select appropriate value
if 'Choose Organization' in driver.title:
titl='Choose Org to Home Page'
try:
driver.find_element_by_id('1').click()
driver.find_element_by_class_name('button').click()
#metricsCollect(titl)
except:
pass
except:
egress()
exit
#-------------------------------------------------
# Add looping structure to minimize browser churn
#-------------------------------------------------
browserLoop=2
while(browserLoop > 0):
#-------------------------------------------------
# View Article(s) with scrolling where possible
# View multiple articles in same session 33%
#-------------------------------------------------
artLoop = 5
"""
if (login%3==0):
artLoop=8
else:
artLoop=4
"""
# print ('artLoop: '+str(artLoop))
#Comment out for sequential evaluation of articles
#idx = int(random.random()*499000)
while artLoop > 0:
#--- Define Random Value ---------------
idx = int(random.random()*piiCount)
idxPii=idx
# print('articleIDX:'+str(idx))
Pii=str(PII[idxPii]).strip('[\']')
titl = 'Content_Delivery'
#sStart = time.time()
try:
print('try to get: '+"http://"+baseURL+"/science/article/pii/"+Pii)
getPage(driver.get("http://"+baseURL+"/science/article/pii/"+Pii))
except urllib2.URLError:
time.sleep(.25)
pass
try:
dtitl=driver.title[:50]
# print(dtitl[:50])
except:
egress()
exit
"""
if artLoop > 0:
artLoop = artLoop-1
idx = idx+1
"""
try:
#if (login%6 == 0):
if (artLoop%5 < 2):
# if (artLoop%5 < 6):
titl='Search_Results'
SrIdx = int(random.random()*100)%100
# print('trying search')
srString=str(SRCH[SrIdx]).strip('[\']').decode('string_escape')
# print srString
try:
dtitl=driver.title#[:50]
# print 'dtitl: '+dtitl
# Article Page Search
s=driver.find_element_by_css_selector('input#quickSearch')
s.send_keys(srString)
getPage(driver.find_element_by_css_selector('input.submit').click())
# # Other Pages
# s=d.find_element_by_id("qs_all")
# >>> s.send_keys('berries')
# >>> d.find_element_by_id("submit_search").click()
except:
# print ('Search form not found '+baseURL)
time.sleep(.5)
pass
#if (login%6 > 4):
if (artLoop%5 > 2):
#--- Load Browse List - "Category List" -------------
titl='Category_List'
# print('trying browse')
getPage(driver.get("http://"+baseURL+"/science/journals"))
#--- Load Journal Home Pages - "Category Home" ------
jrnLoop = 2
while jrnLoop > 0:
titl='Category_Home'
idx=idx+jrnLoop
jIdx=idx%120
# print('trying journal')
getPage(driver.get("http://"+baseURL+"/science/journal/"+str(JRNL[jIdx]).strip('[\']')))
jrnLoop=jrnLoop-1
except:
egress()
exit
if artLoop > 0:
artLoop = artLoop-1
idx = idx+1
browserLoop=browserLoop-1
# print(browserLoop)
print 'join statsDdata'
statsDdata=''.join(l)
print('here is statsDdata')
print(statsDdata)
try:
print('try to send UDP message')
print UDPSock.sendto(statsDdata,addr)
except:
print('UDP send failed')
pass
l=[]
loop = loop+1
idx=idx+1
egress()
except:
# print('loading browser failed')
# print time.time()
# print titl
errorReport(base,titl,'Start Browser Fail')
#print(statsDdata)
time.sleep(5)
pass
|
|
"""JSON (de)serialization framework.
The framework presented here is somewhat based on `Go's "json" package`_
(especially the ``omitempty`` functionality).
.. _`Go's "json" package`: http://golang.org/pkg/encoding/json/
"""
import abc
import binascii
import logging
import OpenSSL
import six
from josepy import b64, errors, interfaces, util
logger = logging.getLogger(__name__)
class Field(object):
"""JSON object field.
:class:`Field` is meant to be used together with
:class:`JSONObjectWithFields`.
``encoder`` (``decoder``) is a callable that accepts a single
parameter, i.e. a value to be encoded (decoded), and returns the
serialized (deserialized) value. In case of errors it should raise
:class:`~josepy.errors.SerializationError`
(:class:`~josepy.errors.DeserializationError`).
Note, that ``decoder`` should perform partial serialization only.
:ivar str json_name: Name of the field when encoded to JSON.
:ivar default: Default value (used when not present in JSON object).
:ivar bool omitempty: If ``True`` and the field value is empty, then
it will not be included in the serialized JSON object, and
``default`` will be used for deserialization. Otherwise, if ``False``,
field is considered as required, value will always be included in the
serialized JSON objected, and it must also be present when
deserializing.
"""
__slots__ = ('json_name', 'default', 'omitempty', 'fdec', 'fenc')
def __init__(self, json_name, default=None, omitempty=False,
decoder=None, encoder=None):
# pylint: disable=too-many-arguments
self.json_name = json_name
self.default = default
self.omitempty = omitempty
self.fdec = self.default_decoder if decoder is None else decoder
self.fenc = self.default_encoder if encoder is None else encoder
@classmethod
def _empty(cls, value):
"""Is the provided value considered "empty" for this field?
This is useful for subclasses that might want to override the
definition of being empty, e.g. for some more exotic data types.
"""
return not isinstance(value, bool) and not value
def omit(self, value):
"""Omit the value in output?"""
return self._empty(value) and self.omitempty
def _update_params(self, **kwargs):
current = dict(json_name=self.json_name, default=self.default,
omitempty=self.omitempty,
decoder=self.fdec, encoder=self.fenc)
current.update(kwargs)
return type(self)(**current) # pylint: disable=star-args
def decoder(self, fdec):
"""Descriptor to change the decoder on JSON object field."""
return self._update_params(decoder=fdec)
def encoder(self, fenc):
"""Descriptor to change the encoder on JSON object field."""
return self._update_params(encoder=fenc)
def decode(self, value):
"""Decode a value, optionally with context JSON object."""
return self.fdec(value)
def encode(self, value):
"""Encode a value, optionally with context JSON object."""
return self.fenc(value)
@classmethod
def default_decoder(cls, value):
"""Default decoder.
Recursively deserialize into immutable types (
:class:`josepy.util.frozendict` instead of
:func:`dict`, :func:`tuple` instead of :func:`list`).
"""
# bases cases for different types returned by json.loads
if isinstance(value, list):
return tuple(cls.default_decoder(subvalue) for subvalue in value)
elif isinstance(value, dict):
return util.frozendict(
dict((cls.default_decoder(key), cls.default_decoder(value))
for key, value in six.iteritems(value)))
else: # integer or string
return value
@classmethod
def default_encoder(cls, value):
"""Default (passthrough) encoder."""
# field.to_partial_json() is no good as encoder has to do partial
# serialization only
return value
class JSONObjectWithFieldsMeta(abc.ABCMeta):
"""Metaclass for :class:`JSONObjectWithFields` and its subclasses.
It makes sure that, for any class ``cls`` with ``__metaclass__``
set to ``JSONObjectWithFieldsMeta``:
1. All fields (attributes of type :class:`Field`) in the class
definition are moved to the ``cls._fields`` dictionary, where
keys are field attribute names and values are fields themselves.
2. ``cls.__slots__`` is extended by all field attribute names
(i.e. not :attr:`Field.json_name`). Original ``cls.__slots__``
are stored in ``cls._orig_slots``.
In a consequence, for a field attribute name ``some_field``,
``cls.some_field`` will be a slot descriptor and not an instance
of :class:`Field`. For example::
some_field = Field('someField', default=())
class Foo(object):
__metaclass__ = JSONObjectWithFieldsMeta
__slots__ = ('baz',)
some_field = some_field
assert Foo.__slots__ == ('some_field', 'baz')
assert Foo._orig_slots == ()
assert Foo.some_field is not Field
assert Foo._fields.keys() == ['some_field']
assert Foo._fields['some_field'] is some_field
As an implementation note, this metaclass inherits from
:class:`abc.ABCMeta` (and not the usual :class:`type`) to mitigate
the metaclass conflict (:class:`ImmutableMap` and
:class:`JSONDeSerializable`, parents of :class:`JSONObjectWithFields`,
use :class:`abc.ABCMeta` as its metaclass).
"""
def __new__(mcs, name, bases, dikt):
fields = {}
for base in bases:
fields.update(getattr(base, '_fields', {}))
# Do not reorder, this class might override fields from base classes!
for key, value in tuple(six.iteritems(dikt)):
# not six.iterkeys() (in-place edit!)
if isinstance(value, Field):
fields[key] = dikt.pop(key)
dikt['_orig_slots'] = dikt.get('__slots__', ())
dikt['__slots__'] = tuple(
list(dikt['_orig_slots']) + list(six.iterkeys(fields)))
dikt['_fields'] = fields
return abc.ABCMeta.__new__(mcs, name, bases, dikt)
@six.add_metaclass(JSONObjectWithFieldsMeta)
class JSONObjectWithFields(util.ImmutableMap, interfaces.JSONDeSerializable):
# pylint: disable=too-few-public-methods
"""JSON object with fields.
Example::
class Foo(JSONObjectWithFields):
bar = Field('Bar')
empty = Field('Empty', omitempty=True)
@bar.encoder
def bar(value):
return value + 'bar'
@bar.decoder
def bar(value):
if not value.endswith('bar'):
raise errors.DeserializationError('No bar suffix!')
return value[:-3]
assert Foo(bar='baz').to_partial_json() == {'Bar': 'bazbar'}
assert Foo.from_json({'Bar': 'bazbar'}) == Foo(bar='baz')
assert (Foo.from_json({'Bar': 'bazbar', 'Empty': '!'})
== Foo(bar='baz', empty='!'))
assert Foo(bar='baz').bar == 'baz'
"""
@classmethod
def _defaults(cls):
"""Get default fields values."""
return dict([(slot, field.default) for slot, field
in six.iteritems(cls._fields)])
def __init__(self, **kwargs):
# pylint: disable=star-args
super(JSONObjectWithFields, self).__init__(
**(dict(self._defaults(), **kwargs)))
def encode(self, name):
"""Encode a single field.
:param str name: Name of the field to be encoded.
:raises errors.SerializationError: if field cannot be serialized
:raises errors.Error: if field could not be found
"""
try:
field = self._fields[name]
except KeyError:
raise errors.Error("Field not found: {0}".format(name))
return field.encode(getattr(self, name))
def fields_to_partial_json(self):
"""Serialize fields to JSON."""
jobj = {}
omitted = set()
for slot, field in six.iteritems(self._fields):
value = getattr(self, slot)
if field.omit(value):
omitted.add((slot, value))
else:
try:
jobj[field.json_name] = field.encode(value)
except errors.SerializationError as error:
raise errors.SerializationError(
'Could not encode {0} ({1}): {2}'.format(
slot, value, error))
return jobj
def to_partial_json(self):
return self.fields_to_partial_json()
@classmethod
def _check_required(cls, jobj):
missing = set()
for _, field in six.iteritems(cls._fields):
if not field.omitempty and field.json_name not in jobj:
missing.add(field.json_name)
if missing:
raise errors.DeserializationError(
'The following fields are required: {0}'.format(
','.join(missing)))
@classmethod
def fields_from_json(cls, jobj):
"""Deserialize fields from JSON."""
cls._check_required(jobj)
fields = {}
for slot, field in six.iteritems(cls._fields):
if field.json_name not in jobj and field.omitempty:
fields[slot] = field.default
else:
value = jobj[field.json_name]
try:
fields[slot] = field.decode(value)
except errors.DeserializationError as error:
raise errors.DeserializationError(
'Could not decode {0!r} ({1!r}): {2}'.format(
slot, value, error))
return fields
@classmethod
def from_json(cls, jobj):
return cls(**cls.fields_from_json(jobj))
def encode_b64jose(data):
"""Encode JOSE Base-64 field.
:param bytes data:
:rtype: `unicode`
"""
# b64encode produces ASCII characters only
return b64.b64encode(data).decode('ascii')
def decode_b64jose(data, size=None, minimum=False):
"""Decode JOSE Base-64 field.
:param unicode data:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
error_cls = TypeError if six.PY2 else binascii.Error
try:
decoded = b64.b64decode(data.encode())
except error_cls as error:
raise errors.DeserializationError(error)
if size is not None and ((not minimum and len(decoded) != size) or
(minimum and len(decoded) < size)):
raise errors.DeserializationError(
"Expected at least or exactly {0} bytes".format(size))
return decoded
def encode_hex16(value):
"""Hexlify.
:param bytes value:
:rtype: unicode
"""
return binascii.hexlify(value).decode()
def decode_hex16(value, size=None, minimum=False):
"""Decode hexlified field.
:param unicode value:
:param int size: Required length (after decoding).
:param bool minimum: If ``True``, then `size` will be treated as
minimum required length, as opposed to exact equality.
:rtype: bytes
"""
value = value.encode()
if size is not None and ((not minimum and len(value) != size * 2) or
(minimum and len(value) < size * 2)):
raise errors.DeserializationError()
error_cls = TypeError if six.PY2 else binascii.Error
try:
return binascii.unhexlify(value)
except error_cls as error:
raise errors.DeserializationError(error)
def encode_cert(cert):
"""Encode certificate as JOSE Base-64 DER.
:type cert: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert.wrapped))
def decode_cert(b64der):
"""Decode JOSE Base-64 DER-encoded certificate.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
def encode_csr(csr):
"""Encode CSR as JOSE Base-64 DER.
:type csr: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
:rtype: unicode
"""
return encode_b64jose(OpenSSL.crypto.dump_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, csr.wrapped))
def decode_csr(b64der):
"""Decode JOSE Base-64 DER-encoded CSR.
:param unicode b64der:
:rtype: `OpenSSL.crypto.X509Req` wrapped in `.ComparableX509`
"""
try:
return util.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, decode_b64jose(b64der)))
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class TypedJSONObjectWithFields(JSONObjectWithFields):
"""JSON object with type."""
typ = NotImplemented
"""Type of the object. Subclasses must override."""
type_field_name = "type"
"""Field name used to distinguish different object types.
Subclasses will probably have to override this.
"""
TYPES = NotImplemented
"""Types registered for JSON deserialization"""
@classmethod
def register(cls, type_cls, typ=None):
"""Register class for JSON deserialization."""
typ = type_cls.typ if typ is None else typ
cls.TYPES[typ] = type_cls
return type_cls
@classmethod
def get_type_cls(cls, jobj):
"""Get the registered class for ``jobj``."""
if cls in six.itervalues(cls.TYPES):
if cls.type_field_name not in jobj:
raise errors.DeserializationError(
"Missing type field ({0})".format(cls.type_field_name))
# cls is already registered type_cls, force to use it
# so that, e.g Revocation.from_json(jobj) fails if
# jobj["type"] != "revocation".
return cls
if not isinstance(jobj, dict):
raise errors.DeserializationError(
"{0} is not a dictionary object".format(jobj))
try:
typ = jobj[cls.type_field_name]
except KeyError:
raise errors.DeserializationError("missing type field")
try:
return cls.TYPES[typ]
except KeyError:
raise errors.UnrecognizedTypeError(typ, jobj)
def to_partial_json(self):
"""Get JSON serializable object.
:returns: Serializable JSON object representing ACME typed object.
:meth:`validate` will almost certainly not work, due to reasons
explained in :class:`josepy.interfaces.IJSONSerializable`.
:rtype: dict
"""
jobj = self.fields_to_partial_json()
jobj[self.type_field_name] = self.typ
return jobj
@classmethod
def from_json(cls, jobj):
"""Deserialize ACME object from valid JSON object.
:raises josepy.errors.UnrecognizedTypeError: if type
of the ACME object has not been registered.
"""
# make sure subclasses don't cause infinite recursive from_json calls
type_cls = cls.get_type_cls(jobj)
return type_cls(**type_cls.fields_from_json(jobj))
|
|
"""
Created on Apr 18, 2017
@author: Christopher Bruns
"""
import os
import numpy
from OpenGL import GL
from OpenGL.GL.shaders import compileShader, compileProgram
from OpenGL.arrays import vbo
from openvr.glframework.glmatrix import identity, pack, rotate_y, scale
from openvr.glframework import shader_string
class TriangleActor(object):
def __init__(self):
self.vao = None
# hard-code shader parameter location index
self.mvp_location = 0
self.program = None
# Create triangle geometry: corner 2D location and colors
self.vertices = vbo.VBO(numpy.array([
[-0.6, -0.4, 1.0, 0.0, 0.0], # x, y, r, g, b
[0.6, -0.4, 0.0, 1.0, 0.0],
[0.0, 0.6, 0.0, 0.0, 1.0],
], dtype='float32'))
def init_gl(self):
# Create vertex array object, apparently required for modern OpenGL
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
self.vertices.bind()
# hard-code shader parameter location indices
vpos_location = 0
vcol_location = 1
GL.glEnableVertexAttribArray(vpos_location)
float_size = self.vertices.dtype.itemsize # 4 bytes per float32
GL.glVertexAttribPointer(vpos_location, 2, GL.GL_FLOAT, False,
float_size * 5, self.vertices + float_size * 0)
GL.glEnableVertexAttribArray(vcol_location)
GL.glVertexAttribPointer(vcol_location, 3, GL.GL_FLOAT, False,
float_size * 5, self.vertices + float_size * 2)
# Create GLSL shader program
vertex_shader = compileShader(
"""#version 450 core
#line 50
layout(location = %d) uniform mat4 MVP = mat4(1);
layout(location = %d) in vec2 vPos;
layout(location = %d) in vec3 vCol;
out vec3 color;
void main()
{
gl_Position = MVP * vec4(vPos, 0.0, 1.0);
color = vCol;
}
""" % (self.mvp_location, vpos_location, vcol_location),
GL.GL_VERTEX_SHADER)
fragment_shader = compileShader(
"""#version 450 core
#line 68
in vec3 color;
out vec4 fragColor;
void main()
{
fragColor = vec4(color, 1);
}
""",
GL.GL_FRAGMENT_SHADER)
self.program = compileProgram(vertex_shader, fragment_shader)
def display_gl(self, model_view, projection):
GL.glBindVertexArray(self.vao)
GL.glUseProgram(self.program)
mvp = numpy.matrix(model_view) * projection
GL.glUniformMatrix4fv(self.mvp_location, 1, False, pack(mvp))
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 3)
def dispose_gl(self):
if self.vao:
GL.glDeleteVertexArrays(1, [self.vao, ])
self.vertices.delete()
GL.glDeleteProgram(self.program)
class ObjActor(object):
def __init__(self, obj_stream):
self.model_matrix = identity()
self.vao = None
self.shader = None
self.vertexes = list()
vertex_normals = list()
self.normal_for_vertex = dict()
self.faces = list()
fh = obj_stream
for line in fh:
if line.startswith('#'):
# e.g. "# Blender v2.65 (sub 0) OBJ File"
continue # ignore comments
elif line.startswith('o '):
# e.g. "o teapot.005"
continue # ignore object names
elif line.startswith('v '):
# e.g. "v -0.498530 0.712498 -0.039883"
vec3 = [float(x) for x in line.split()[1:4]]
self.vertexes.append(vec3)
elif line.startswith('vn '):
# e.g. "vn -0.901883 0.415418 0.118168"
vec3 = [float(x) for x in line.split()[1:4]]
vertex_normals.append(vec3)
elif line.startswith('s '):
continue # ignore whatever "s" is
# print(line)
elif line.startswith('f '):
face = list()
for c in line.split()[1:]:
v, n = [int(x) for x in c.split('/')[0:3:2]]
face.append(v - 1) # vertex index
self.normal_for_vertex[v - 1] = vertex_normals[n - 1]
self.faces.append(face)
# print(line)
# print(face)
else:
print(line)
break
self.vbo = list()
ibo = list()
for i in range(len(self.vertexes)):
v = self.vertexes[i]
n = self.normal_for_vertex[i]
self.vbo.append(v)
self.vbo.append(n)
for f in self.faces:
for v in f:
ibo.append(v)
# todo: only works for single triangle faces at the moment...
self.element_count = len(ibo)
self.vbo = numpy.array(self.vbo, 'float32')
ibo = numpy.array(ibo, 'int16')
self.vbo = vbo.VBO(self.vbo)
self.ibo = vbo.VBO(ibo, target=GL.GL_ELEMENT_ARRAY_BUFFER)
def init_gl(self):
self.vao = GL.glGenVertexArrays(1)
GL.glBindVertexArray(self.vao)
self.ibo.bind()
self.vbo.bind()
GL.glEnableVertexAttribArray(0) # vertex location
float_size = self.vbo.dtype.itemsize # 4 bytes per float32
GL.glVertexAttribPointer(0, 3, GL.GL_FLOAT, False,
6 * float_size, self.vbo + 0 * float_size)
GL.glEnableVertexAttribArray(1) # vertex normal
GL.glVertexAttribPointer(1, 3, GL.GL_FLOAT, False,
6 * float_size, self.vbo + 3 * float_size)
vertex_shader = compileShader(
shader_string("""
layout(location = 0) in vec3 in_Position;
layout(location = 1) in vec3 in_Normal;
layout(location = 0) uniform mat4 projection = mat4(1);
layout(location = 1) uniform mat4 model_view = mat4(1);
out vec3 normal;
void main()
{
gl_Position = projection * model_view * vec4(in_Position, 1.0);
mat4 normal_matrix = transpose(inverse(model_view));
normal = normalize((normal_matrix * vec4(in_Normal, 0)).xyz);
}
"""),
GL.GL_VERTEX_SHADER)
fragment_shader = compileShader(
shader_string(""" in vec3 normal;
out vec4 fragColor;
vec4 color_by_normal(in vec3 n) {
return vec4(0.5 * (normalize(n) + vec3(1)), 1);
}
void main()
{
fragColor = color_by_normal(normal);
}
"""),
GL.GL_FRAGMENT_SHADER)
self.shader = compileProgram(vertex_shader, fragment_shader)
GL.glEnable(GL.GL_DEPTH_TEST)
def display_gl(self, model_view, projection):
GL.glBindVertexArray(self.vao)
GL.glUseProgram(self.shader)
m = self.model_matrix * model_view
GL.glUniformMatrix4fv(0, 1, False, pack(projection))
GL.glUniformMatrix4fv(1, 1, False, pack(m))
GL.glDrawElements(GL.GL_TRIANGLES, self.element_count, GL.GL_UNSIGNED_SHORT, None)
def dispose_gl(self):
if self.vao:
GL.glDeleteVertexArrays(1, [self.vao, ])
self.ibo.delete()
self.vbo.delete()
GL.glDeleteProgram(self.shader)
self.vao = None
class TeapotActor(ObjActor):
def __init__(self):
src_folder = os.path.dirname(os.path.abspath(__file__))
obj_path = os.path.join(src_folder, 'wt_teapot.obj')
with open(obj_path) as fh:
super(TeapotActor, self).__init__(obj_stream=fh)
if __name__ == "__main__":
from openvr.glframework.glfw_app import GlfwVrApp
import glfw
teapot = TeapotActor()
s = 0.2 # size of teapot in meters
with GlfwVrApp(actors=[teapot, ]) as app:
while not glfw.window_should_close(app.window):
# scale teapot to original Melitta model aspect ratio
teapot.model_matrix = scale(s, s*4/3, s) * rotate_y(glfw.get_time())
app.render_scene()
|
|
from django.contrib.gis.db.models.fields import BaseSpatialField
from django.contrib.gis.measure import Distance
from django.db import NotSupportedError
from django.db.models.expressions import Expression
from django.db.models.lookups import Lookup, Transform
from django.db.models.sql.query import Query
from django.utils.regex_helper import _lazy_re_compile
class RasterBandTransform(Transform):
def as_sql(self, compiler, connection):
return compiler.compile(self.lhs)
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
band_rhs = None
band_lhs = None
def __init__(self, lhs, rhs):
rhs, *self.rhs_params = rhs if isinstance(rhs, (list, tuple)) else [rhs]
super().__init__(lhs, rhs)
self.template_params = {}
self.process_rhs_params()
def process_rhs_params(self):
if self.rhs_params:
# Check if a band index was passed in the query argument.
if len(self.rhs_params) == (2 if self.lookup_name == 'relate' else 1):
self.process_band_indices()
elif len(self.rhs_params) > 1:
raise ValueError('Tuple too long for lookup %s.' % self.lookup_name)
elif isinstance(self.lhs, RasterBandTransform):
self.process_band_indices(only_lhs=True)
def process_band_indices(self, only_lhs=False):
"""
Extract the lhs band index from the band transform class and the rhs
band index from the input tuple.
"""
# PostGIS band indices are 1-based, so the band index needs to be
# increased to be consistent with the GDALRaster band indices.
if only_lhs:
self.band_rhs = 1
self.band_lhs = self.lhs.band_index + 1
return
if isinstance(self.lhs, RasterBandTransform):
self.band_lhs = self.lhs.band_index + 1
else:
self.band_lhs = 1
self.band_rhs, *self.rhs_params = self.rhs_params
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
return ('%s', [connection.ops.Adapter(value)])
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, Query):
# If rhs is some Query, don't touch it.
return super().process_rhs(compiler, connection)
if isinstance(self.rhs, Expression):
self.rhs = self.rhs.resolve_expression(compiler.query)
rhs, rhs_params = super().process_rhs(compiler, connection)
placeholder = connection.ops.get_geom_placeholder(self.lhs.output_field, self.rhs, compiler)
return placeholder % rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return
# an object (SpatialOperator) with an as_sql() method to allow for more
# complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql, 'value': '%s', **self.template_params}
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
@BaseSpatialField.register_lookup
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
@BaseSpatialField.register_lookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
@BaseSpatialField.register_lookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
@BaseSpatialField.register_lookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
@BaseSpatialField.register_lookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
@BaseSpatialField.register_lookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
@BaseSpatialField.register_lookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
@BaseSpatialField.register_lookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
@BaseSpatialField.register_lookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
BaseSpatialField.register_lookup(SameAsLookup, 'exact')
@BaseSpatialField.register_lookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
@BaseSpatialField.register_lookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
@BaseSpatialField.register_lookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
# ------------------
# Geometry functions
# ------------------
@BaseSpatialField.register_lookup
class ContainsLookup(GISLookup):
lookup_name = 'contains'
@BaseSpatialField.register_lookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
@BaseSpatialField.register_lookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
@BaseSpatialField.register_lookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
@BaseSpatialField.register_lookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
@BaseSpatialField.register_lookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
@BaseSpatialField.register_lookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
@BaseSpatialField.register_lookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
@BaseSpatialField.register_lookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
@BaseSpatialField.register_lookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = _lazy_re_compile(r'^[012TF\*]{9}$')
def process_rhs(self, compiler, connection):
# Check the pattern argument
pattern = self.rhs_params[0]
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(pattern)
elif not isinstance(pattern, str) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
sql, params = super().process_rhs(compiler, connection)
return sql, params + [pattern]
@BaseSpatialField.register_lookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
@BaseSpatialField.register_lookup
class WithinLookup(GISLookup):
lookup_name = 'within'
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def process_rhs_params(self):
if not 1 <= len(self.rhs_params) <= 3:
raise ValueError("2, 3, or 4-element tuple required for '%s' lookup." % self.lookup_name)
elif len(self.rhs_params) == 3 and self.rhs_params[2] != 'spheroid':
raise ValueError("For 4-element tuples the last argument must be the 'spheroid' directive.")
# Check if the second parameter is a band index.
if len(self.rhs_params) > 1 and self.rhs_params[1] != 'spheroid':
self.process_band_indices()
def process_distance(self, compiler, connection):
dist_param = self.rhs_params[0]
return (
compiler.compile(dist_param.resolve_expression(compiler.query))
if hasattr(dist_param, 'resolve_expression') else
('%s', connection.ops.get_distance(self.lhs.output_field, self.rhs_params, self.lookup_name))
)
@BaseSpatialField.register_lookup
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %(value)s)'
def process_distance(self, compiler, connection):
dist_param = self.rhs_params[0]
if (
not connection.features.supports_dwithin_distance_expr and
hasattr(dist_param, 'resolve_expression') and
not isinstance(dist_param, Distance)
):
raise NotSupportedError(
'This backend does not support expressions for specifying '
'distance in the dwithin lookup.'
)
return super().process_distance(compiler, connection)
def process_rhs(self, compiler, connection):
dist_sql, dist_params = self.process_distance(compiler, connection)
self.template_params['value'] = dist_sql
rhs_sql, params = super().process_rhs(compiler, connection)
return rhs_sql, params + dist_params
class DistanceLookupFromFunction(DistanceLookupBase):
def as_sql(self, compiler, connection):
spheroid = (len(self.rhs_params) == 2 and self.rhs_params[-1] == 'spheroid') or None
distance_expr = connection.ops.distance_expr_for_lookup(self.lhs, self.rhs, spheroid=spheroid)
sql, params = compiler.compile(distance_expr.resolve_expression(compiler.query))
dist_sql, dist_params = self.process_distance(compiler, connection)
return (
'%(func)s %(op)s %(dist)s' % {'func': sql, 'op': self.op, 'dist': dist_sql},
params + dist_params,
)
@BaseSpatialField.register_lookup
class DistanceGTLookup(DistanceLookupFromFunction):
lookup_name = 'distance_gt'
op = '>'
@BaseSpatialField.register_lookup
class DistanceGTELookup(DistanceLookupFromFunction):
lookup_name = 'distance_gte'
op = '>='
@BaseSpatialField.register_lookup
class DistanceLTLookup(DistanceLookupFromFunction):
lookup_name = 'distance_lt'
op = '<'
@BaseSpatialField.register_lookup
class DistanceLTELookup(DistanceLookupFromFunction):
lookup_name = 'distance_lte'
op = '<='
|
|
#this is the base class for tester objects
import sys
import subprocess
import logging
import os
from time import clock
from result import Result
studentLogger = logging.getLogger('errorLogger.progressLogger.studentLogger')
studentLogger.setLevel(logging.INFO)
h = logging.StreamHandler()
h.setLevel(logging.INFO)
studentLogger.addHandler(h)
class Tester(object):
"""
Tester
abstract class representing a testing interface
"""
__name__ = 'Tester' #the name of this class
INPUT_STDIN = 1 #input is expected to come via standard input
INPUT_CMDLINE = 2 #input is expected to comve via the command line
OUTPUT_STDOUT = 3 #output of executable will be to the standard output
OUTPUT_FILE = 4 #exectuables output will be a file
_PROGRAM_COMPLETED = 5 #the program completed the test
_PROGRAM_CRASHED = 6 #the program crashed during a test
_PROGRAM_TIMED_OUT = 7 #the program timed out during a test
def str2InputType(typename):
"""converts the string name of the input type to the internal type
@typename: the name of the type. either
stdin
cmdline
"""
if(typename.lower() == 'stdin'):
return Tester.INPUT_STDIN
elif(typename.lower() == 'cmdline'):
return Tester.INPUT_CMDLINE
else:
raise ValueError('Unknown input type ' + typename)
def str2OutputType(typename):
"""converts the string name of the output type to the internal type
@typename: the name of the type. either
stdout
file
"""
if(typename.lower() == 'stdout'):
return Tester.OUTPUT_STDOUT
elif(typename.lower() == 'file'):
return Tester.OUTPUT_FILE
else:
raise ValueError('Unknown input type ' + typename)
def __init__(self, executable,
usingCmdArgs, usingStdin, outputType,
inDir, solDir, scratchDir,
maxRunTime = 5, cmdArgs = None, lines2skip = 0):
"""
@executable: the name of the exectuable to be run
@usingCmdArgs: Are command line arguments being used?
@usingStdin: Will there be input from the standard input?
@outputType: how are outputs generated: Either
OUTPUT_STDOUT: for when the solution is sent to standard out or
OUTPUT_FILE: for when the solution is sent to a file
@inDir: the name of the directory containing the inputs to be used for testing
the naming convention for the tests contained within is testname-test.filetype
@solDir: the name of the directory containing the solutions
the naming convention for the solutions contained within is testname-sol.filetype
@scratchDir: directory to write scratch files in
@maxRunTime: the maximum number of seconds to run the program or
None to allow the program to run until completion (if it does not terminate the program will hang)
@cmdArgs: a list of additional command line arguments to the executable
@lines2skip: number of lines of output program and solution file to skip
"""
self.executable = executable
self.usingCmdArgs = usingCmdArgs
self.usingStdin = usingStdin
self.outputType = outputType
self.inDir = inDir
self.solDir = solDir
self.scratchDir = scratchDir
self.maxRunTime = maxRunTime
self.lines2skip = lines2skip
if cmdArgs == None:
self.cmdArgs = []
else:
self.cmdArgs = cmdArgs.copy()
self.testFiles = [self.inDir + os.sep +
test for test in os.listdir(inDir) if not test.startswith('.')] #get the tests in the test directory
self.testFiles.sort() #make the tests sorted
if(scratchDir == None):
self.userOut = None
else:
self.userOut = scratchDir + os.sep + 'userOut.txt' #file to temporarily store the use's output
self.startTime = 0 #when did the test begin running
self.endTime = 0 #when did the test end running
self.results = [] #the results of the testing
def _runOne(self, inFileName, outFileName = None):
"""run self.executable using the inputs contained in inFileName
@inFileName: the name of the file containing the inputs
@outFileName: the name of the file to write the program's stdout to if the solution is contained in the stdout
@returns: the success status of running the program
"""
#determine how to pass input file
infile = None #the input file to be used
additionalArgs = []
with open(inFileName) as infile:
if(self.usingCmdArgs): #using command line arguments
num_args = int(infile.readline()) #the first line contains the number of command line arguments
for i in range(num_args): #read the command arguments in
additionalArgs.append(infile.readline().strip())
#remaining lines in the file are considerd input to be given
#via standard input
#determine how outputs will be generated
outfile = None
if(self.outputType == Tester.OUTPUT_STDOUT): #outputting to stdout
outfile = open(outFileName,'w') #open a file to hold the results
elif(self.outputType == Tester.OUTPUT_FILE): #outputting to a file
raise NotImplementedError #nothing we can really do as of now
else:
raise NotImplementedError
#this clears out python's buffer so that the program run through subprocess
#actually gets input. Another fix if this stops working is to open the file in unbuffered mode
#http://stackoverflow.com/questions/22417010/subprocess-popen-stdin-read-file
infile.seek(infile.tell())
studentLogger.info('Preparing to test %s on %s', self.executable, os.path.basename(inFileName))
#start the clocks
self.endTime = clock()
self.startTime = clock()
#run the program
with subprocess.Popen([self.executable] + self.cmdArgs + additionalArgs,
stdin = infile,
stdout = outfile,
stderr = subprocess.PIPE,
universal_newlines = True) as program:
try:
program.wait(timeout = self.maxRunTime) #wait for the program to finish
self.endTime = clock() #program completed
err = '\t'.join(program.stderr.readlines()) #always have to read the pipes
if(program.returncode != 0):
studentLogger.warning('%s %s crashed for the following reasons:\n\t%s\n',
self.executable, ' '.join(self.cmdArgs), err)
return Tester._PROGRAM_CRASHED
else:
return Tester._PROGRAM_COMPLETED
except subprocess.TimeoutExpired:
studentLogger.warning('%s %s timed out', ' '.join(self.cmdArgs), self.executable)
program.kill()
return Tester._PROGRAM_TIMED_OUT
#end _runOne
def testOne(self, inFile, solFile):
"""
run the executable using inFile as the inputs
and checking the output against solFile
@inFile: the name of the file containing the inputs
@solFile: the name of the file containg the solution
@returns: a Result
"""
progStatus = self._runOne(inFile, self.userOut)#run the program
testName = os.path.basename(inFile) #the name of the test
if(progStatus == Tester._PROGRAM_CRASHED):
return Result(testName, False, 'Crashed')
elif(progStatus == Tester._PROGRAM_TIMED_OUT):
return Result(testName, False, 'Timed Out')
else: #program completed successfully
if(self.outputType == Tester.OUTPUT_STDOUT):
with open(self.userOut) as answer:
(correct, out, sol) = self._checkSolution(answer, solFile)
if(correct):
studentLogger.info('%s %s passed test %s',
self.executable, ' '.join(self.cmdArgs),
os.path.basename(inFile))
else:
first_diff = ''
i = 0
for (i,(o,s)) in enumerate(zip(out,sol)):
if o != s:
first_diff = 'First mismatch at word %d\nout = %s but sol = %s' % (i,o,s)
#print(first_diff)
break
studentLogger.info('%s %s failed test %s. Program output: %s \n Solution: %s \n%s\n\n',
self.executable, ' '.join(self.cmdArgs),
os.path.basename(inFile), out, sol, first_diff)
return Result(testName, correct, self.endTime - self.startTime )
else: #haven't done anything where solutions are contained in files
raise NotImplementedError
#end testOne
def generateSolutions(self):
"""generates all the solutions"""
for test in self.testFiles:
outfileName = test.replace('-test', '-sol')
outfileName = self.solDir + os.sep + os.path.basename(outfileName)
self._runOne(test,outfileName)
#end generateSolutions
def testAll(self):
"""
Test all the tests
@returns: a list of triples of the form (testName, correct, time taken)
correct is True if the answer is correct and False if it is wrong
time taken is expressed in seconds.
See Result
"""
self.results = [] #clear old results if any exist
for test in self.testFiles:
#get the name of the file containing the solution to this test
sol = test.replace('-test', '-sol') #replace test with sol
sol = self.solDir + os.sep + os.path.basename(sol) #prepend solution directory name and remove test directory path
self.results.append(self.testOne(test, sol))
try:
os.remove(self.userOut)
except FileNotFoundError:
pass
#end testAll
def getResults(self):
"""get the results of the testing"""
return self.results.copy()
#end getResults
def getNumTests(self):
"""get the number of tests that are to be preformed"""
return len(self.testFiles)
#end getNumTests
def getNumCorrect(self):
"""
get the number of answers that were correct
should only be called after testAll is run
@returns: the number of tests that were correct
"""
numCorrect = 0
for res in self.results:
if(res.correct == True):
numCorrect += 1
return numCorrect
#end getNumCorrect
def getPercentCorrect(self):
"""get the precentage of right answers
should only be called after testAll is run
@returns: the percentage of tests that were correct
"""
numCorrect = self.getNumCorrect()
numTests = float(self.getNumTests())
return numCorrect / numTests
#end getPercentCorrect
def getMissedTests(self):
"""
get a list of the test names that were missed
should only be called after testAll is run
@returns: a list of the test names that were missed
"""
return [res.testName for res in self.results if not res.correct]
#end getMissedTests
def getPassedTests(self):
"""
get a list of the test names that were passed correctly
should only be called after testAll is run
@returns: a list of the test names that were passed correctly
"""
return [res.testName for res in self.results if res.correct]
#getPassedTests
def getTestNames(self):
"""
get the names of the tests to be run
can be called before testAll is run
@returns: a list containg the test names
"""
return [test.rsplit(os.sep, 1)[-1] for test in self.testFiles]
def _checkSolution(self, progOut, solutionFileName):
"""
checkSolution
@progOut: the opened file containing the student's answer
@solutionFileName: the name of the file containg the solution
@returns a tuple containing
(correct, program out, solution)
"""
progOut.flush() #make sure the file is up to date
progOut.seek(0) #go back to the begining of the file
try:
sol = []
with open(solutionFileName, 'r') as solFil: #open the solution file
for (_1,_2,_3) in zip(range(self.lines2skip), progOut, solFil): #somehow placin zip as the first argument fixes a bug
pass #skip the leading lines of input
sol = [] #the solution
for line in solFil: #make it so that white space does not matter
sol += line.strip().split()
out = [] # the programs output
for line in progOut: #make it so that white space will not be an issue
out += line.strip().split()
if(out != sol):#output does not match solution
return (False, out, sol)
return (True, out, sol) #if everything is correct and all lines in the solution file are used the student got it right
except UnicodeDecodeError:
return (False, 'NonUnicode Character. This means your print statement is printing something crazy.', sol)
# end _checkSolution
#end class Tester
if __name__ =='__main__':
python = sys.executable
t = Tester('./triMatMult.out', True, False, Tester.OUTPUT_STDOUT,
'Tests/tfiles', 'Solutions', '.')
t.testAll()
for res in t.getResults():
print(res)
|
|
#
# Promise package for Python 2.7+. Node has such a wonderful way to handle server side. With all of the debates
# on what to do with Python around GIL, I decided to support both GIL via threads and also multicore in the same
# API structure.
#
# Threads version (default) uses GIL and therefore all your data is thread safe. It just runs.
#
# multicore uses multiprocessing. Effectively, the script is forked to another core and the process is monitored.
# When the function finishes, one can return data but only if it is JSON seralizable. TBD: Other means to
# share results from a process can be added.
#
# This is a good useable start!
#
#
# greg@brightappsllc.com - Copyright (c) 2016
#
# To Be opensourced
#
from threading import Thread
from threading import Event
import time
from multiprocessing import Process, Queue
import json
version = "0.4"
# Our deferred object that can be held by your main code
class Deferred(object):
def __init__(self):
self._event = Event()
self._rejected = False
self._result = None
def resolve(self, value):
self._rejected = False
self._result = value
self._event.set()
def reject(self, reason):
self._rejected = True
self._result = reason
self._event.set()
def promise(self):
promise = Intent(self)
return promise
# A Promise is an Intent to do something. The Intent utilizes an IntentTask that does the actual calling
# of your function. The Intent either resolves with a successful promise or rejected if it can't finish what
# it promised
class Intent(object):
def __init__ (self,deferred ):
self._deferred = deferred
def then(self, multicore=False, resolved=None, rejected=None):
defer = Deferred()
global pool
def task():
try:
# Important: ALL data returned from your function must be JSON seralizable for mutlicore
if multicore:
q = Queue ()
self._deferred.process = Process (target=self._deferred.runmc, args=(q,))
self._deferred.process.start()
self._deferred.process.join()
res = q.get()
if 'Error' in list(res):
results = json.loads ( res['Error'] )
self._deferred._rejected = True
self._deferred._result = results
else:
results = json.loads ( res['Success'])
self._deferred._result = results
else:
Thread (target=self._deferred.run).start()
self._deferred._event.wait()
if self._deferred._rejected:
result = self._deferred._result
if rejected:
result = rejected(self._deferred._result)
defer.reject(result)
else:
result = self._deferred._result
if resolved:
result = resolved(self._deferred._result)
defer.resolve(result)
except Exception as ex:
defer.reject(ex.message)
rejected (self._deferred._result)
Thread(target=task).start()
return defer.promise()
def wait(self):
self._deferred._event.wait()
@staticmethod
def wait_all(*args):
for promise in args:
if isinstance( promise, list):
for item in promise:
item.wait ()
else:
promise.wait()
ret = False
# ANY promise we are waiting for MUST end with success or the chain fails
for promise in args:
if isinstance ( promise, list ):
for item in promise:
ret = item._deferred._rejected
if ret == True:
return False
else:
ret = promise._deferred._rejected
if ret == True:
return False
return True
# todo: Other logic for exiting promises
# The class that wraps your function and is hand's the user code a Promise to execute
class IntentTask (Deferred ):
func = None
args = None
def setfunc ( self, func, *args):
self.func = func
if args:
self.args = args
def runmc ( self, q):
if self.func:
try :
if self.args:
self._result = self.func(self.args)
else:
self._result = self.func ()
res = {'Success' : json.dumps ( self._result)}
q.put ( res)
self._event.set()
except Exception as e:
self._rejected = True
self._result = e.message
q.put ( {'Error' : json.dumps(self._result)} )
self._event.set()
else:
# I am going to do nothing and be happy
self._result = "Did nothing"
self._event.set()
def run (self ):
if self.func:
try :
if self.args:
self._result = self.func(self.args)
else:
self._result = self.func ()
self._event.set()
except Exception as e:
self._rejected = True
self._result = e.message
self._event.set()
else:
# I am going to do nothing and be happy
self._result = "Did nothing"
self._event.set()
class Promise ( Intent ):
def __init__ ( self, func=None, *args ):
task = IntentTask()
self.task = task
super (Promise, self).__init__(task)
if func:
self.setup (func, *args)
def setup (self, func, *args ):
self.task.setfunc ( func, *args )
return self
# todo: this is going to change so that this is a dependant call .. do after
# todo: a do call after a then
# todo: Chaining promises
### TEST STUBS
### TODO: Move to python test
### TODO: create pip package
def myerror (result):
print 'error'
print result
def mysuccess (result):
print 'mysuccess'
print result
def sleep (args):
print 'sleeping ' + str ( args[0])
time.sleep(args[0])
x = 1/0
raise ValueError ('foo')
def mctest (args):
ret = []
for arg in args:
ret.append (arg)
ret.append ('Rocks')
return ret
def dosomeloops():
y = 3.14
for x in range (0,1000000):
y = y * 10.0;
def mcfail ():
raise ValueError ('Test the failure case')
if __name__ == '__main__':
t = time.clock()
p = Promise (sleep, 1).then(rejected=myerror)
p.wait ()
#Multicore
p = Promise(mcfail).then (multicore=True, rejected=myerror)
p.wait()
for x in range (0, 10):
dosomeloops()
e = time.clock ()
print e-t
p = []
p.append (Promise(mctest, 'Erin', 'Greg', 'Mallorie', 'Josie', 'Emma').then (multicore=True, resolved=mysuccess, rejected=myerror))
t = time.clock()
for x in range (0, 10):
p.append ( Promise (dosomeloops).then (multicore=True))
if Promise.wait_all ( p ):
print 'Success'
e = time.clock ()
print e-t
#Thread
t = time.clock()
p = []
for x in range ( 0, 10):
p.append(Promise(dosomeloops).then())
if Promise.wait_all( p ):
print 'Success'
e = time.clock ()
print e-t
#m = model ()
#loadmodel(m)
#p2 = Promise().do( loadbigrams, m ).then (resolved=mysuccess, rejected=myerror)
#if Promise.wait_all ( p2 ):
# print 'groovy'
#p = []
#for x in range ( 0, 10):
# p.append (Promise().do( heatword2vec, 3, m).then (resolved=mysuccess, rejected=myerror))
#p5 = Promise ( loadtrigrams).then()
#if Promise.wait_all( p, p5):
# print 'All promises we were interested in successfully completed'
print 'All promises done'
|
|
# -*- coding: utf-8 -*-
"""
github3.git
===========
This module contains all the classes relating to Git Data.
See also: http://developer.github.com/v3/git/
"""
from __future__ import unicode_literals
from json import dumps
from base64 import b64decode
from .models import GitHubObject, GitHubCore, BaseCommit
from .users import User
from .decorators import requires_auth
class Blob(GitHubObject):
"""The :class:`Blob <Blob>` object.
See also: http://developer.github.com/v3/git/blobs/
"""
def _update_attributes(self, blob):
self._api = blob.get('url', '')
#: Raw content of the blob.
self.content = blob.get('content').encode()
#: Encoding of the raw content.
self.encoding = blob.get('encoding')
#: Decoded content of the blob.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content)
#: Size of the blob in bytes
self.size = blob.get('size')
#: SHA1 of the blob
self.sha = blob.get('sha')
def _repr(self):
return '<Blob [{0:.10}]>'.format(self.sha)
class GitData(GitHubCore):
"""The :class:`GitData <GitData>` object. This isn't directly returned to
the user (developer) ever. This is used to prevent duplication of some
common items among other Git Data objects.
"""
def _update_attributes(self, data):
#: SHA of the object
self.sha = data.get('sha')
self._api = data.get('url', '')
class Commit(BaseCommit):
"""The :class:`Commit <Commit>` object. This represents a commit made in a
repository.
See also: http://developer.github.com/v3/git/commits/
"""
def _update_attributes(self, commit):
super(Commit, self)._update_attributes(commit)
#: dict containing at least the name, email and date the commit was
#: created
self.author = commit.get('author', {}) or {}
# If GH returns nil/None then make sure author is a dict
self._author_name = self.author.get('name', '')
#: dict containing similar information to the author attribute
self.committer = commit.get('committer', {}) or {}
# blank the data if GH returns no data
self._commit_name = self.committer.get('name', '')
#: :class:`Tree <Tree>` the commit belongs to.
self.tree = None
if commit.get('tree'):
self.tree = Tree(commit.get('tree'), self)
def _repr(self):
return '<Commit [{0}:{1}]>'.format(self._author_name, self.sha)
def author_as_User(self):
"""Attempt to return the author attribute as a
:class:`User <github3.users.User>`. No guarantees are made about the
validity of this object, i.e., having a login or created_at object.
"""
return User(self.author, self)
def committer_as_User(self):
"""Attempt to return the committer attribute as a
:class:`User <github3.users.User>` object. No guarantees are made
about the validity of this object.
"""
return User(self.committer, self)
class Reference(GitHubCore):
"""The :class:`Reference <Reference>` object. This represents a reference
created on a repository.
See also: http://developer.github.com/v3/git/refs/
"""
def _update_attributes(self, ref):
self._api = ref.get('url', '')
#: The reference path, e.g., refs/heads/sc/featureA
self.ref = ref.get('ref')
#: :class:`GitObject <GitObject>` the reference points to
self.object = GitObject(ref.get('object', {}))
def _repr(self):
return '<Reference [{0}]>'.format(self.ref)
@requires_auth
def delete(self):
"""Delete this reference.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, sha, force=False):
"""Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
"""
data = {'sha': sha, 'force': force}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_attributes(json)
return True
return False
class GitObject(GitData):
"""The :class:`GitObject <GitObject>` object."""
def _update_attributes(self, obj):
super(GitObject, self)._update_attributes(obj)
#: The type of object.
self.type = obj.get('type')
def _repr(self):
return '<Git Object [{0}]>'.format(self.sha)
class Tag(GitData):
"""The :class:`Tag <Tag>` object.
See also: http://developer.github.com/v3/git/tags/
"""
def _update_attributes(self, tag):
super(Tag, self)._update_attributes(tag)
#: String of the tag
self.tag = tag.get('tag')
#: Commit message for the tag
self.message = tag.get('message')
#: dict containing the name and email of the person
self.tagger = tag.get('tagger')
#: :class:`GitObject <GitObject>` for the tag
self.object = GitObject(tag.get('object', {}))
def _repr(self):
return '<Tag [{0}]>'.format(self.tag)
class Tree(GitData):
"""The :class:`Tree <Tree>` object.
See also: http://developer.github.com/v3/git/trees/
"""
def _update_attributes(self, tree):
super(Tree, self)._update_attributes(tree)
#: list of :class:`Hash <Hash>` objects
self.tree = [Hash(t) for t in tree.get('tree', [])]
def _repr(self):
return '<Tree [{0}]>'.format(self.sha)
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return self._instance_or_null(Tree, json)
class Hash(GitHubObject):
"""The :class:`Hash <Hash>` object.
See also: http://developer.github.com/v3/git/trees/#create-a-tree
"""
def _update_attributes(self, info):
#: Path to file
self.path = info.get('path')
#: File mode
self.mode = info.get('mode')
#: Type of hash, e.g., blob
self.type = info.get('type')
#: Size of hash
self.size = info.get('size')
#: SHA of the hash
self.sha = info.get('sha')
#: URL of this object in the GitHub API
self.url = info.get('url')
def _repr(self):
return '<Hash [{0}]>'.format(self.sha)
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.sites.models import Site
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.models import Text
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.core.urlresolvers import reverse
from cms.api import create_page, add_plugin, create_title
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_DIRTY
from cms.management.commands import publisher_publish
from cms.models import CMSPlugin, Title
from cms.models.pagemodel import Page
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import SettingsOverrideTestCase as TestCase
from cms.test_utils.util.context_managers import StdoutOverride, SettingsOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.compat.dj import get_user_model
class PublisherCommandTests(TestCase):
"""
Tests for the publish command
"""
def test_command_line_should_raise_without_superuser(self):
with self.assertRaises(CommandError):
com = publisher_publish.Command()
com.handle_noargs()
def test_command_line_publishes_zero_pages_on_empty_db(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
def test_command_line_ignores_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
self.assertEqual(Page.objects.public().count(), 0)
def test_command_line_publishes_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
def test_command_line_publishes_selected_language(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = True
title.save()
title = create_title('fr', 'fr title', page)
title.published = True
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_command_line_publishes_selected_language_drafts(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = False
title.save()
title = create_title('fr', 'fr title', page)
title.published = False
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', language='de', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
User = get_user_model()
User.objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_command_line_publishes_one_page(self):
"""
Publisher always creates two Page objects for every CMS page,
one is_draft and one is_public.
The public version of the page can be either published or not.
This bit of code uses sometimes manager methods and sometimes manual
filters on purpose (this helps test the managers)
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Now, let's create a page. That actually creates 2 Page objects
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
# Now check that the non-draft has the attribute we set to the draft.
non_draft = Page.objects.public()[0]
self.assertEqual(non_draft.reverse_id, 'a_test')
def test_command_line_publish_multiple_languages(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Create a draft page with two published titles
page = create_page(u"The page!", "nav_playground.html", "en", published=False)
title = create_title('de', 'ja', page)
title.published = True
title.save()
title = create_title('fr', 'non', page)
title.published = True
title.save()
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de', 'fr'])
def test_command_line_publish_one_site(self):
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
siteA = Site.objects.create(domain='a.example.com', name='a.example.com')
siteB = Site.objects.create(domain='b.example.com', name='b.example.com')
#example.com
create_page(u"example.com homepage", "nav_playground.html", "en", published=True)
#a.example.com
create_page(u"a.example.com homepage", "nav_playground.html", "de", site=siteA, published=True)
#b.example.com
create_page(u"b.example.com homepage", "nav_playground.html", "de", site=siteB, published=True)
create_page(u"b.example.com about", "nav_playground.html", "nl", site=siteB, published=True)
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish', site=siteB.id)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 2)
self.assertEqual(published_from_output, 2)
def test_command_line_publish_multiple_languages_check_count(self):
"""
Publishing one page with multiple languages still counts
as one page. This test case checks whether it works
as expected.
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Now, let's create a page with 2 languages.
page = create_page("en title", "nav_playground.html", "en", published=True)
create_title("de", "de title", page)
page.publish("de")
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('publisher_publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
def tearDown(self):
plugin_pool.patched = False
plugin_pool.set_plugin_meta()
class PublishingTests(TestCase):
def create_page(self, title=None, **kwargs):
return create_page(title or self._testMethodName,
"nav_playground.html", "en", **kwargs)
def test_publish_home(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.publisher_public_id)
self.assertEqual(Page.objects.all().count(), 1)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], "http://testserver/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
def test_publish_single(self):
name = self._testMethodName
page = self.create_page(name, published=False)
self.assertFalse(page.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertIsNotNone(page.publisher_public)
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state("en"), 0)
def test_publish_admin(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_publisher_state('en'), 0)
def test_publish_wrong_lang(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with SettingsOverride(
LANGUAGES=(('de', 'de'), ('en', 'en')),
CMS_LANGUAGES={1: [{'code': 'en', 'name': 'en', 'fallbacks': ['fr', 'de'], 'public': True}]}
):
with self.login_user_context(superuser):
with force_language('de'):
response = self.client.get(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
def test_publish_child_first(self):
parent = self.create_page('parent', published=False)
child = self.create_page('child', published=False, parent=parent)
parent = parent.reload()
self.assertFalse(parent.is_published('en'))
self.assertFalse(child.is_published('en'))
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
child.publish("en")
child = child.reload()
self.assertTrue(child.is_published("en"))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(child.publisher_public)
# Since we have no parent, the state is otherwise unchanged
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
parent.publish("en")
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published('en')
# Cascade publish for all pending descendants
for name in ('parent', 'child'):
self.assertObjectExist(drafts, title_set__title=name)
page = drafts.get(title_set__title=name)
self.assertTrue(page.is_published("en"), name)
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT, name)
self.assertIsNotNone(page.publisher_public, name)
self.assertTrue(page.publisher_public.is_published('en'), name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_simple_publisher(self):
"""
Creates the stuff needed for these tests.
Please keep this up-to-date (the docstring!)
A
/ \
B C
"""
# Create a simple tree of 3 pages
pageA = create_page("Page A", "nav_playground.html", "en",
published=True)
pageB = create_page("Page B", "nav_playground.html", "en", parent=pageA,
published=True)
pageC = create_page("Page C", "nav_playground.html", "en", parent=pageA,
published=False)
# Assert A and B are published, C unpublished
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(not pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 2)
# Let's publish C now.
pageC.publish("en")
# Assert all are published
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published("en")), 3)
def test_i18n_publishing(self):
page = self.create_page('parent', published=True)
self.assertEqual(Title.objects.all().count(), 2)
create_title("de", "vater", page)
self.assertEqual(Title.objects.all().count(), 3)
self.assertEqual(Title.objects.filter(published=True).count(), 2)
page.publish('de')
self.assertEqual(Title.objects.all().count(), 4)
self.assertEqual(Title.objects.filter(published=True).count(), 4)
def test_publish_ordering(self):
page = self.create_page('parent', published=True)
pageA = self.create_page('pageA', parent=page, published=True)
pageC = self.create_page('pageC', parent=page, published=True)
pageB = self.create_page('pageB', parent=page, published=True)
page = page.reload()
pageB.move_page(pageA, 'right')
pageB.publish("en")
# pageC needs reload since B has swapped places with it
pageC.reload().publish("en")
pageA.publish('en')
drafts = Page.objects.drafts().order_by('path')
draft_titles = [(p.get_title('en'), p.path) for p in drafts]
self.assertEqual([('parent', "0001"),
('pageA', "00010001"),
('pageB', "00010002"),
('pageC', "00010003")], draft_titles)
public = Page.objects.public().order_by('path')
public_titles = [(p.get_title('en'), p.path) for p in public]
self.assertEqual([('parent', "0002"),
('pageA', "00020001"),
('pageB', "00020002"),
('pageC', "00020003")], public_titles)
page.publish('en')
drafts = Page.objects.drafts().order_by('path')
draft_titles = [(p.get_title('en'), p.path) for p in drafts]
self.assertEqual([('parent', "0001"),
('pageA', "00010001"),
('pageB', "00010002"),
('pageC', "00010003")], draft_titles)
public = Page.objects.public().order_by('path')
public_titles = [(p.get_title('en'), p.path) for p in public]
self.assertEqual([('parent', "0002"),
('pageA', "00020001"),
('pageB', "00020002"),
('pageC', "00020003")], public_titles)
def test_publish_ordering2(self):
page = self.create_page('parent', published=False)
pageA = self.create_page('pageA', published=False)
pageC = self.create_page('pageC', published=False, parent=pageA)
pageB = self.create_page('pageB', published=False, parent=pageA)
page = page.reload()
pageA.publish('en')
pageB.publish('en')
pageC.publish('en')
page.publish('en')
drafts = Page.objects.filter(publisher_is_draft=True).order_by('path')
publics = Page.objects.filter(publisher_is_draft=False).order_by('path')
x = 0
for draft in drafts:
self.assertEqual(draft.publisher_public_id, publics[x].pk)
x += 1
def test_unpublish_unpublish(self):
name = self._testMethodName
page = self.create_page(name, published=True)
drafts = Page.objects.drafts()
published = Page.objects.public().published("en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page.unpublish('en')
self.assertFalse(page.is_published('en'))
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish('en')
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_delete_title_unpublish(self):
page = self.create_page('test', published=True)
sub_page = self.create_page('test2', published=True, parent=page)
self.assertTrue(sub_page.publisher_public.is_published('en'))
page.title_set.all().delete()
self.assertFalse(sub_page.publisher_public.is_published('en', force_reload=True))
def test_modify_child_while_pending(self):
home = self.create_page("Home", published=True, in_navigation=True)
child = self.create_page("Child", published=True, parent=home,
in_navigation=False)
home = home.reload()
home.unpublish('en')
self.assertEqual(Title.objects.count(), 4)
child = child.reload()
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(child.in_navigation)
self.assertFalse(child.publisher_public.in_navigation)
child.in_navigation = True
child.save()
child.publish('en')
child = self.reload(child)
self.assertEqual(Title.objects.count(), 4)
self.assertTrue(child.is_published('en'))
self.assertFalse(child.publisher_public.is_published('en'))
self.assertTrue(child.in_navigation)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
self.assertTrue(child.is_published('en'))
self.assertTrue(child.publisher_public_id)
self.assertTrue(child.publisher_public.in_navigation)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_descendants(self):
home = self.create_page("Home", published=True)
child = self.create_page("Child", published=True, parent=home)
gc = self.create_page("GC", published=True, parent=child)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published('en'))
home = home.reload()
home.unpublish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.is_published("en"))
self.assertTrue(gc.is_published("en"))
self.assertFalse(child.publisher_public.is_published("en"))
self.assertFalse(gc.publisher_public.is_published('en'))
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
home.publish('en')
child = self.reload(child)
gc = self.reload(gc)
self.assertTrue(child.publisher_public_id)
self.assertTrue(gc.is_published('en'))
self.assertTrue(child.is_published('en'))
self.assertTrue(gc.publisher_public_id)
self.assertEqual(child.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertEqual(gc.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
def test_republish_with_dirty_children(self):
home = self.create_page("Home", published=True)
dirty1 = self.create_page("Dirty1", published=True, parent=home)
dirty2 = self.create_page("Dirty2", published=True, parent=home)
home = self.reload(home)
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
dirty1.in_navigation = True
dirty1.save()
home.unpublish('en')
dirty2.in_navigation = True
dirty2.save()
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published)
self.assertTrue(dirty2.publisher_public_id)
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
home = self.reload(home)
with self.assertNumQueries(FuzzyInt(0, 100)):
home.publish('en')
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published("en"))
self.assertTrue(dirty2.is_published("en"))
self.assertTrue(dirty1.publisher_public.is_published("en"))
self.assertTrue(dirty2.publisher_public.is_published("en"))
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
def test_republish_with_unpublished_child(self):
"""
Unpub1 was never published, and unpub2 has been unpublished after the
fact. None of the grandchildren should become published.
"""
home = self.create_page("Home", published=True)
unpub1 = self.create_page("Unpub1", published=False, parent=home)
unpub2 = self.create_page("Unpub2", published=True, parent=home)
gc1 = self.create_page("GC1", published=True, parent=unpub1)
gc2 = self.create_page("GC2", published=True, parent=unpub2)
self.assertFalse(gc1.publisher_public_id)
self.assertFalse(gc1.publisher_public_id)
self.assertTrue(gc1.is_published('en'))
self.assertTrue(gc2.is_published('en'))
home.unpublish('en')
unpub1 = self.reload(unpub1)
unpub2.unpublish('en') # Just marks this as not published
for page in (unpub1, unpub2):
self.assertFalse(page.is_published('en'), page)
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertIsNone(unpub1.publisher_public)
self.assertIsNotNone(unpub2.publisher_public)
self.assertFalse(unpub2.publisher_public.is_published('en'))
gc1 = self.reload(gc1)
gc2 = self.reload(gc2)
for page in (gc1, gc2):
self.assertTrue(page.is_published('en'))
self.assertEqual(page.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
self.assertIsNone(gc1.publisher_public)
self.assertIsNotNone(gc2.publisher_public)
self.assertFalse(gc2.publisher_public.is_published('en'))
def test_unpublish_with_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
self.create_page("Grandchild", parent=child, published=True)
page = page.reload()
child.reload()
drafts = Page.objects.drafts()
public = Page.objects.public()
published = Page.objects.public().published("en")
self.assertEqual(published.count(), 3)
self.assertEqual(page.get_descendant_count(), 2)
base = reverse('pages-root')
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200, url)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
self.assertTrue(item.publisher_public_id)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.unpublish('en'), 'Unpublish was not successful')
self.assertFalse(page.is_published('en'))
cache.clear()
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectDoesNotExist(published, title_set__title=title)
item = drafts.get(title_set__title=title)
if title == 'Page':
self.assertFalse(item.is_published("en"))
self.assertFalse(item.publisher_public.is_published("en"))
# Not sure what the proper state of these are after unpublish
#self.assertEqual(page.publisher_state, PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.is_dirty('en'))
else:
# The changes to the published subpages are simply that the
# published flag of the PUBLIC instance goes to false, and the
# publisher state is set to mark waiting for parent
self.assertTrue(item.is_published('en'), title)
self.assertFalse(item.publisher_public.is_published('en'), title)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_PENDING,
title)
self.assertTrue(item.is_dirty('en'), title)
def test_unpublish_with_dirty_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
gchild = self.create_page("Grandchild", parent=child, published=True)
child.in_navigation = True
child.save()
self.assertTrue(child.is_dirty("en"))
self.assertFalse(gchild.is_dirty('en'))
self.assertTrue(child.publisher_public.is_published('en'))
self.assertTrue(gchild.publisher_public.is_published('en'))
page.unpublish('en')
child = self.reload(child)
gchild = self.reload(gchild)
# Descendants become dirty after unpublish
self.assertTrue(child.is_dirty('en'))
self.assertTrue(gchild.is_dirty('en'))
# However, their public version is still removed no matter what
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(gchild.publisher_public.is_published('en'))
def test_prepublish_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=False)
gchild2 = self.create_page("Grandchild2", parent=child, published=False)
self.create_page("Grandchild3", parent=child, published=True)
gchild = self.create_page("Grandchild", published=True)
gchild = gchild.reload()
child = child.reload()
gchild.move_page(target=child, position='last-child')
gchild.reload()
gchild.publish('en')
self.assertFalse(child.is_published('en'))
self.assertTrue(gchild.is_published('en'))
self.assertEqual(gchild.get_publisher_state('en'), PUBLISHER_STATE_PENDING)
child = child.reload()
child.publish('en')
gchild2 = gchild2.reload()
gchild2.publish('en')
self.assertTrue(child.is_published("en"))
self.assertTrue(gchild.is_published("en"))
self.assertEqual(gchild.get_publisher_state('en', force_reload=True), PUBLISHER_STATE_DEFAULT)
gchild = gchild.reload()
gchild2 = gchild2.reload()
self.assertEqual(gchild.path[4:], gchild.publisher_public.path[4:])
self.assertEqual(gchild.depth, gchild.publisher_public.depth)
def test_republish_multiple_root(self):
# TODO: The paths do not match expected behaviour
home = self.create_page("Page", published=True)
other = self.create_page("Another Page", published=True)
child = self.create_page("Child", published=True, parent=home)
child2 = self.create_page("Child", published=True, parent=other)
self.assertTrue(Page.objects.filter(is_home=True).count(), 2)
self.assertTrue(home.is_home)
home = home.reload()
self.assertTrue(home.publisher_public.is_home)
root = reverse('pages-root')
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
home = self.reload(home)
home.unpublish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertFalse(home.is_home)
self.assertFalse(home.publisher_public.is_home)
self.assertTrue(other.is_home)
self.assertTrue(other.publisher_public.is_home)
self.assertEqual(other.get_absolute_url(), root)
self.assertEqual(other.get_public_object().get_absolute_url(), root)
self.assertEqual(home.get_absolute_url(), root + 'page/')
self.assertEqual(home.get_public_object().get_absolute_url(), root + 'page/')
self.assertEqual(child.get_absolute_url(), root + 'page/child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'page/child/')
self.assertEqual(child2.get_absolute_url(), root + 'child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'child/')
home.publish('en')
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertTrue(home.is_home)
self.assertTrue(home.publisher_public.is_home)
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
def test_revert_contents(self):
user = self.get_superuser()
page = create_page("Page", "nav_playground.html", "en", published=True,
created_by=user)
placeholder = page.placeholders.get(slot=u"body")
deleted_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Deleted content")
text_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Public content")
page.publish('en')
# Modify and delete plugins
text_plugin.body = "<p>Draft content</p>"
text_plugin.save()
deleted_plugin.delete()
self.assertEqual(CMSPlugin.objects.count(), 3)
# Now let's revert and restore
page.revert('en')
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertEqual(CMSPlugin.objects.count(), 4)
plugins = CMSPlugin.objects.filter(placeholder__page=page)
self.assertEqual(plugins.count(), 2)
plugins = [plugin.get_plugin_instance()[0] for plugin in plugins]
self.assertEqual(plugins[0].body, "Deleted content")
self.assertEqual(plugins[1].body, "Public content")
def test_revert_move(self):
parent = create_page("Parent", "nav_playground.html", "en", published=True)
parent_url = parent.get_absolute_url()
page = create_page("Page", "nav_playground.html", "en", published=True,
parent=parent)
other = create_page("Other", "nav_playground.html", "en", published=True)
other_url = other.get_absolute_url()
child = create_page("Child", "nav_playground.html", "en", published=True,
parent=page)
parent = parent.reload()
page = page.reload()
self.assertEqual(page.get_absolute_url(), parent_url + "page/")
self.assertEqual(child.get_absolute_url(), parent_url + "page/child/")
# Now let's move it (and the child)
page.move_page(other)
page = self.reload(page)
child = self.reload(child)
self.assertEqual(page.get_absolute_url(), other_url + "page/")
self.assertEqual(child.get_absolute_url(), other_url + "page/child/")
# Public version changed the url as well
self.assertEqual(page.publisher_public.get_absolute_url(), other_url + "page/")
self.assertEqual(child.publisher_public.get_absolute_url(), other_url + "page/child/")
def test_publish_works_with_descendants(self):
"""
For help understanding what this tests for, see:
http://articles.sitepoint.com/print/hierarchical-data-database
Creates this published structure:
home
/ \
item1 item2
/ \
subitem1 subitem2
"""
home_page = create_page("home", "nav_playground.html", "en",
published=True, in_navigation=False)
create_page("item1", "nav_playground.html", "en", parent=home_page,
published=True)
item2 = create_page("item2", "nav_playground.html", "en", parent=home_page,
published=True)
create_page("subitem1", "nav_playground.html", "en", parent=item2,
published=True)
create_page("subitem2", "nav_playground.html", "en", parent=item2,
published=True)
item2 = item2.reload()
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('path'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('path'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
if public.parent:
self.assertEqual(public.path[0:4], public.parent.path[0:4])
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
# Same principle for the draft tree
self.assertEqual(draft.path[0:4], draft.parent.path[0:4])
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
# Now call publish again. The structure should not change.
item2.publish('en')
not_drafts = list(Page.objects.filter(publisher_is_draft=False).order_by('path'))
drafts = list(Page.objects.filter(publisher_is_draft=True).order_by('path'))
self.assertEqual(len(not_drafts), 5)
self.assertEqual(len(drafts), 5)
for idx, draft in enumerate(drafts):
public = not_drafts[idx]
# Check that a node doesn't become a root node magically
self.assertEqual(bool(public.parent_id), bool(draft.parent_id))
self.assertEqual(public.numchild, draft.numchild)
if public.parent:
self.assertEqual(public.path[0:4], public.parent.path[0:4])
self.assertTrue(public.parent in public.get_ancestors())
self.assertTrue(public in public.parent.get_descendants())
self.assertTrue(public in public.parent.get_children())
if draft.parent:
self.assertEqual(draft.path[0:4], draft.parent.path[0:4])
self.assertTrue(draft.parent in draft.get_ancestors())
self.assertTrue(draft in draft.parent.get_descendants())
self.assertTrue(draft in draft.parent.get_children())
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova import objects
from nova.scheduler.filters import affinity_filter
from nova import test
from nova.tests.unit.scheduler import fakes
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
class TestDifferentHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestDifferentHostFilter, self).setUp()
self.filt_cls = affinity_filter.DifferentHostFilter()
def test_affinity_different_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
inst1 = objects.Instance(uuid='different')
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'different_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_handles_none(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSameHostFilter(test.NoDBTestCase):
def setUp(self):
super(TestSameHostFilter, self).setUp()
self.filt_cls = affinity_filter.SameHostFilter()
def test_affinity_same_filter_passes(self):
inst1 = objects.Instance(uuid='same')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_no_list_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': 'same'}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'same_host': ['same'], }}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_handles_none(self):
inst1 = objects.Instance(uuid='different')
host = fakes.FakeHostState('host1', 'node1', {})
host.instances = {inst1.uuid: inst1}
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
def setUp(self):
super(TestSimpleCIDRAffinityFilter, self).setUp()
self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
def test_affinity_simple_cidr_filter_passes(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
host = fakes.FakeHostState('host1', 'node1', {})
host.host_ip = '10.8.1.1'
affinity_ip = "10.8.1.100"
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_handles_none(self):
host = fakes.FakeHostState('host1', 'node1', {})
affinity_ip = CONF.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': mock.sentinel.ctx,
'scheduler_hints': None}
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
class TestGroupAffinityFilter(test.NoDBTestCase):
def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': [policy]}
filter_properties['group_hosts'] = []
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties['group_hosts'] = ['host2']
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_passes(self):
self._test_group_anti_affinity_filter_passes(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_anti_affinity_filter_fails(self):
self._test_group_anti_affinity_filter_fails(
affinity_filter.ServerGroupAntiAffinityFilter(),
'anti-affinity')
def _test_group_affinity_filter_passes(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['anti-affinity']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
filter_properties = {'group_policies': ['affinity'],
'group_hosts': ['host1']}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_passes(self):
self._test_group_affinity_filter_passes(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
def _test_group_affinity_filter_fails(self, filt_cls, policy):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_policies': [policy],
'group_hosts': ['host2']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_group_affinity_filter_fails(self):
self._test_group_affinity_filter_fails(
affinity_filter.ServerGroupAffinityFilter(), 'affinity')
|
|
__author__ = '116_24'
import pygame
import sys
import time
from pygame.locals import *
import random
import math
import entities
pygame.init()
mainClock = pygame.time.Clock()
windowSurface = pygame.display.set_mode((500, 400), 0, 32)
pygame.display.set_caption('Dungeon Sam is pretty damn cool v0.9, 10/16/11')
white = (255, 255, 255)
black = (0, 0, 0)
windowSurface.fill(white)
#Load/ define images here
hero = pygame.Rect(300, 100, 30, 33)#hero-the-rectangle must have the same co-ordinates as heroStretchedImage
heroImage = pygame.image.load('hero.png')
heroStrechedImage = pygame.transform.scale(heroImage,(30, 33))
wall = pygame.Rect(250, 200, 38, 38)
wallImage = pygame.image.load('Wall.png')
enemy = pygame.Rect(150, 350, 38, 38)
enemyImage = pygame.image.load('enemy.png')
powerUp = pygame.Rect(100, 100, 21, 21)#power up, may be removed, just an idea that is easy to make
powerUpImage = pygame.image.load('Diamond.png')
arrowImage = pygame.image.load('arrowUp.png')
swordImage = pygame.image.load('Sword.png')
HP = pygame.font.SysFont(None, 20).render('HP', False, black,)#Draw "HP" on the screen(not really, "blit" is the one that does it)
#Define Variables here
HpRedContent = 0 #hpredcontent is the amount of red is in the hp bar. It makes the hpbar slide from green to red
HpGreenContent = 255
HpRect = HP.get_rect()
blit = False
heroHp = 200
monsterHp = 50
arrowBlit = False
ARROWMOVESPEED = 5
enemyBlit = True
swordBlit = False
shifty = False
lifetime = 0
onScreen = False
#Define Objects here
#static object, doesn't do much yet...
class Static(object):
"""base class for solid non-moving objects such as walls"""
def __init__(self, image, name):
self.name = name
self.image = image
self.x = self.image.x
self.y = self.image.y
class weapon(object):
"""Arrow"""
def shoot(self, direction):
if direction == "Left":
arrow.move_ip(-1 * ARROWMOVESPEED, 0)
if direction == "Right":
arrow.move_ip(ARROWMOVESPEED, 0)
if direction == "Up":
arrow.move_ip(0, -1 * ARROWMOVESPEED)
if direction == "Down":
arrow.move_ip(0, ARROWMOVESPEED)
def getRatio (self, A, B, AMS):#AMS means arrowmovespeed
self.__ratio = AMS / math.sqrt(A * A + B * B)
def getXY (self, A, B):
self.__X = self.__ratio * A
self.__Y = self.__ratio * B
def getNextX(self, heroX):
NextX = heroX + self.__X
return NextX
def getNextY(self, heroY):
NextY = heroY + self.__Y
return NextY
class melee(weapon):
"""Sword"""
def __init__(self):
lifetime = 0
def slash(self, direction):
if direction == "Left":
arrow.move_ip(-1 * ARROWMOVESPEED, 0)
if direction == "Right":
arrow.move_ip(ARROWMOVESPEED, 0)
if direction == "Up":
arrow.move_ip(0, -1 * ARROWMOVESPEED)
if direction == "Down":
arrow.move_ip(0, ARROWMOVESPEED)
player = entities.Hero(hero, 'Bob', heroHp)
monster1 = entities.Monster(enemy, 'joe', monsterHp)
MOVESPEED = 2
staticWall = Static(wall, 'wall')
#mainloop...program actually runs here
while player.Hp > 0: # DON"T DELETE THE WHILE LOOP
for event in pygame.event.get():#detects events
if event.type == QUIT:#if the "X" button is pressed in top right
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT or event.key == ord('a'):
player.moving = True
player.movingLeft = True
player.movingRight = False
if event.key == K_RIGHT or event.key == ord('d'):
player.moving = True
player.movingRight = True
player.movingLeft = False
if event.key == K_UP or event.key == ord('w'):
player.moving = True
player.movingUp = True
player.movingDown = False
if event.key == K_DOWN or event.key == ord('s'):
player.moving = True
player.movingUp = False
player.movingDown = True
if event.key == K_x:
pygame.quit()
sys.exit()
if event.key == K_LSHIFT:
shifty = True
if event.type == KEYUP:
if event.key == K_UP or event.key == ord('w'): # WASD!!
player.moving = False
player.movingUp = False
if event.key == K_DOWN or event.key == ord('s'):
player.moving = False
player.movingDown = False
if event.key == K_LEFT or event.key == ord('a'):
player.moving = False
player.movingLeft = False
if event.key == K_RIGHT or event.key == ord('d'):
player.moving = False
player.movingRight = False
if event.key == K_LSHIFT:
shifty = False
if event.type == MOUSEBUTTONDOWN and shifty == False:#ARROWS ARROWS ARROWS ARROWS ARROWS
xDelta = event.pos[0] - hero.centerx
yDelta = event.pos[1] - hero.centery
arrow = pygame.Rect(hero.centerx, hero.centery, 9, 14)
newArrow = weapon()
newArrow.getRatio(xDelta, yDelta, 10)
newArrow.getXY(xDelta, yDelta)
arrowBlit = True
if event.type == MOUSEBUTTONDOWN and shifty == True:#SWORDS SWORDS SWORDS SWORDS SWORDS
xDelta = event.pos[0] - hero.centerx
yDelta = event.pos[1] - hero.centery
sword = pygame.Rect(hero.centerx, hero.centery, 9, 14)
newSword = melee()
newSword.getRatio(xDelta, yDelta, 10)
newSword.getXY(xDelta, yDelta)
swordBlit = True
onScreen = True
if onScreen == True:
lifetime += 1
if lifetime >= 4 and swordBlit == True:
swordBlit = False
onScreen = False
lifetime = 0
# Move arrow and update arrows
if arrowBlit == True:
arrow.centerx = newArrow.getNextX(arrow.centerx)
arrow.centery = newArrow.getNextY(arrow.centery)
if arrowBlit == True and arrow.colliderect(enemy):
monster1.Hp -= 1 #arrow does 1 damage
if monster1.Hp == 0:
enemyBlit == False
#Move and update swords
if swordBlit == True and lifetime < 4 and lifetime != 0:
sword.centerx = newSword.getNextX(sword.centerx)
sword.centery = newSword.getNextY(sword.centery)
if swordBlit == True and sword.colliderect(enemy):
monster1.Hp -= 1 #arrow does 1 damage
if monster1.Hp < 0:
enemyBlit == False
player.getOldX()
#calls on players moving methods
if player.movingLeft == True and player.image.left > 0:
player.moveLeft(MOVESPEED)
if player.movingRight == True and player.image.right < 500:
player.moveRight(MOVESPEED)
if player.movingUp == True and player.image.top > 0:
player.moveUp(MOVESPEED)
if player.movingDown == True and player.image.bottom < 400:
player.moveDown(MOVESPEED)
windowSurface.fill(white)
if player.image.colliderect(wall):
player.collideWall()
if player.image.colliderect(enemy):
player.takeDamage(HpRedContent, HpGreenContent)
HpRedContent = player.newRed
HpGreenContent = player.newGreen
Here is the entities module:
import pygame
import sys
from pygame.locals import*
hero = pygame.Rect(300, 100, 30, 33)#hero-the-rectangle must have the same co-ordinates as heroStretchedImage
heroImage = pygame.image.load('hero.png')
heroStrechedImage = pygame.transform.scale(heroImage,(30, 33))
enemy = pygame.Rect(150, 350, 38, 38)
enemyImage = pygame.image.load('enemy.png')
#Entity object - all moving things(player, monsters, arrow) will be derived from this
class Entity(object):
"""Entity"""
#reintroduced moving; thought it would be useful
moving = False
movingLeft = False
movingRight = False
movingUp = False
movingDown = False
def __init__(self, image, name, Hp):
self.name = name
self.image = image
self.Hp = Hp
self.x = self.image.x
self.y = self.image.y
self.location = (self.x, self.y)
self.newRed = 0
self.newGreen = self.Hp + 55
#moving methods
def moveLeft(self, Xspeed):
self.image.left -= Xspeed
def moveRight(self, Xspeed):
self.image.right += Xspeed
def moveUp(self, Yspeed):
self.image.top -= Yspeed
def moveDown(self, Yspeed):
self.image.bottom += Yspeed
def takeDamage(amount):
pass
#Hero object(what the player is)
class Hero(Entity):
"""Hero"""
inventory = []
oldX = None
oldY = None
def getOldX(self):
self.oldX = self.image.left
self.oldY = self.image.top
def heroDie(self):
pygame.quit()
sys.exit()
def collideWall(self):
self.image.left = self.oldX
self.image.top = self.oldY
def takeDamage(self, RedContent, GreenContent):
self.Hp -= 1
self.newRed += 1
self.newGreen = self.Hp
class Monster(Entity):
"""MONSTER"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.