text
stringlengths 2
999k
|
|---|
import sys
import os
import io
from hashlib import md5
from contextlib import contextmanager
from random import Random
import pathlib
import unittest
import unittest.mock
import tarfile
from test import support
from test.support import script_helper
# Check for our compression modules.
try:
import gzip
except ImportError:
gzip = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
def md5sum(data):
return md5(data).hexdigest()
TEMPDIR = os.path.abspath(support.TESTFN) + "-tardir"
tarextdir = TEMPDIR + '-extract-test'
tarname = support.findfile("testtar.tar")
gzipname = os.path.join(TEMPDIR, "testtar.tar.gz")
bz2name = os.path.join(TEMPDIR, "testtar.tar.bz2")
xzname = os.path.join(TEMPDIR, "testtar.tar.xz")
tmpname = os.path.join(TEMPDIR, "tmp.tar")
dotlessname = os.path.join(TEMPDIR, "testtar")
md5_regtype = "65f477c818ad9e15f7feab0c6d37742f"
md5_sparse = "a54fbc4ca4f4399a90e1b27164012fc6"
class TarTest:
tarname = tarname
suffix = ''
open = io.FileIO
taropen = tarfile.TarFile.taropen
@property
def mode(self):
return self.prefix + self.suffix
@support.requires_gzip
class GzipTest:
tarname = gzipname
suffix = 'gz'
open = gzip.GzipFile if gzip else None
taropen = tarfile.TarFile.gzopen
@support.requires_bz2
class Bz2Test:
tarname = bz2name
suffix = 'bz2'
open = bz2.BZ2File if bz2 else None
taropen = tarfile.TarFile.bz2open
@support.requires_lzma
class LzmaTest:
tarname = xzname
suffix = 'xz'
open = lzma.LZMAFile if lzma else None
taropen = tarfile.TarFile.xzopen
class ReadTest(TarTest):
prefix = "r:"
def setUp(self):
self.tar = tarfile.open(self.tarname, mode=self.mode,
encoding="iso8859-1")
def tearDown(self):
self.tar.close()
class UstarReadTest(ReadTest, unittest.TestCase):
def test_fileobj_regular_file(self):
tarinfo = self.tar.getmember("ustar/regtype")
with self.tar.extractfile(tarinfo) as fobj:
data = fobj.read()
self.assertEqual(len(data), tarinfo.size,
"regular file extraction failed")
self.assertEqual(md5sum(data), md5_regtype,
"regular file extraction failed")
def test_fileobj_readlines(self):
self.tar.extract("ustar/regtype", TEMPDIR)
tarinfo = self.tar.getmember("ustar/regtype")
with open(os.path.join(TEMPDIR, "ustar/regtype"), "r") as fobj1:
lines1 = fobj1.readlines()
with self.tar.extractfile(tarinfo) as fobj:
fobj2 = io.TextIOWrapper(fobj)
lines2 = fobj2.readlines()
self.assertEqual(lines1, lines2,
"fileobj.readlines() failed")
self.assertEqual(len(lines2), 114,
"fileobj.readlines() failed")
self.assertEqual(lines2[83],
"I will gladly admit that Python is not the fastest "
"running scripting language.\n",
"fileobj.readlines() failed")
def test_fileobj_iter(self):
self.tar.extract("ustar/regtype", TEMPDIR)
tarinfo = self.tar.getmember("ustar/regtype")
with open(os.path.join(TEMPDIR, "ustar/regtype"), "r") as fobj1:
lines1 = fobj1.readlines()
with self.tar.extractfile(tarinfo) as fobj2:
lines2 = list(io.TextIOWrapper(fobj2))
self.assertEqual(lines1, lines2,
"fileobj.__iter__() failed")
def test_fileobj_seek(self):
self.tar.extract("ustar/regtype", TEMPDIR)
with open(os.path.join(TEMPDIR, "ustar/regtype"), "rb") as fobj:
data = fobj.read()
tarinfo = self.tar.getmember("ustar/regtype")
fobj = self.tar.extractfile(tarinfo)
text = fobj.read()
fobj.seek(0)
self.assertEqual(0, fobj.tell(),
"seek() to file's start failed")
fobj.seek(2048, 0)
self.assertEqual(2048, fobj.tell(),
"seek() to absolute position failed")
fobj.seek(-1024, 1)
self.assertEqual(1024, fobj.tell(),
"seek() to negative relative position failed")
fobj.seek(1024, 1)
self.assertEqual(2048, fobj.tell(),
"seek() to positive relative position failed")
s = fobj.read(10)
self.assertEqual(s, data[2048:2058],
"read() after seek failed")
fobj.seek(0, 2)
self.assertEqual(tarinfo.size, fobj.tell(),
"seek() to file's end failed")
self.assertEqual(fobj.read(), b"",
"read() at file's end did not return empty string")
fobj.seek(-tarinfo.size, 2)
self.assertEqual(0, fobj.tell(),
"relative seek() to file's end failed")
fobj.seek(512)
s1 = fobj.readlines()
fobj.seek(512)
s2 = fobj.readlines()
self.assertEqual(s1, s2,
"readlines() after seek failed")
fobj.seek(0)
self.assertEqual(len(fobj.readline()), fobj.tell(),
"tell() after readline() failed")
fobj.seek(512)
self.assertEqual(len(fobj.readline()) + 512, fobj.tell(),
"tell() after seek() and readline() failed")
fobj.seek(0)
line = fobj.readline()
self.assertEqual(fobj.read(), data[len(line):],
"read() after readline() failed")
fobj.close()
def test_fileobj_text(self):
with self.tar.extractfile("ustar/regtype") as fobj:
fobj = io.TextIOWrapper(fobj)
data = fobj.read().encode("iso8859-1")
self.assertEqual(md5sum(data), md5_regtype)
try:
fobj.seek(100)
except AttributeError:
# Issue #13815: seek() complained about a missing
# flush() method.
self.fail("seeking failed in text mode")
# Test if symbolic and hard links are resolved by extractfile(). The
# test link members each point to a regular member whose data is
# supposed to be exported.
def _test_fileobj_link(self, lnktype, regtype):
with self.tar.extractfile(lnktype) as a, \
self.tar.extractfile(regtype) as b:
self.assertEqual(a.name, b.name)
def test_fileobj_link1(self):
self._test_fileobj_link("ustar/lnktype", "ustar/regtype")
def test_fileobj_link2(self):
self._test_fileobj_link("./ustar/linktest2/lnktype",
"ustar/linktest1/regtype")
def test_fileobj_symlink1(self):
self._test_fileobj_link("ustar/symtype", "ustar/regtype")
def test_fileobj_symlink2(self):
self._test_fileobj_link("./ustar/linktest2/symtype",
"ustar/linktest1/regtype")
def test_issue14160(self):
self._test_fileobj_link("symtype2", "ustar/regtype")
class GzipUstarReadTest(GzipTest, UstarReadTest):
pass
class Bz2UstarReadTest(Bz2Test, UstarReadTest):
pass
class LzmaUstarReadTest(LzmaTest, UstarReadTest):
pass
class ListTest(ReadTest, unittest.TestCase):
# Override setUp to use default encoding (UTF-8)
def setUp(self):
self.tar = tarfile.open(self.tarname, mode=self.mode)
def test_list(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=False)
out = tio.detach().getvalue()
self.assertIn(b'ustar/conttype', out)
self.assertIn(b'ustar/regtype', out)
self.assertIn(b'ustar/lnktype', out)
self.assertIn(b'ustar' + (b'/12345' * 40) + b'67/longname', out)
self.assertIn(b'./ustar/linktest2/symtype', out)
self.assertIn(b'./ustar/linktest2/lnktype', out)
# Make sure it puts trailing slash for directory
self.assertIn(b'ustar/dirtype/', out)
self.assertIn(b'ustar/dirtype-with-size/', out)
# Make sure it is able to print unencodable characters
def conv(b):
s = b.decode(self.tar.encoding, 'surrogateescape')
return s.encode('ascii', 'backslashreplace')
self.assertIn(conv(b'ustar/umlauts-\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'misc/regtype-hpux-signed-chksum-'
b'\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'misc/regtype-old-v7-signed-chksum-'
b'\xc4\xd6\xdc\xe4\xf6\xfc\xdf'), out)
self.assertIn(conv(b'pax/bad-pax-\xe4\xf6\xfc'), out)
self.assertIn(conv(b'pax/hdrcharset-\xe4\xf6\xfc'), out)
# Make sure it prints files separated by one newline without any
# 'ls -l'-like accessories if verbose flag is not being used
# ...
# ustar/conttype
# ustar/regtype
# ...
self.assertRegex(out, br'ustar/conttype ?\r?\n'
br'ustar/regtype ?\r?\n')
# Make sure it does not print the source of link without verbose flag
self.assertNotIn(b'link to', out)
self.assertNotIn(b'->', out)
def test_list_verbose(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=True)
out = tio.detach().getvalue()
# Make sure it prints files separated by one newline with 'ls -l'-like
# accessories if verbose flag is being used
# ...
# ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/conttype
# ?rw-r--r-- tarfile/tarfile 7011 2003-01-06 07:19:43 ustar/regtype
# ...
self.assertRegex(out, (br'\?rw-r--r-- tarfile/tarfile\s+7011 '
br'\d{4}-\d\d-\d\d\s+\d\d:\d\d:\d\d '
br'ustar/\w+type ?\r?\n') * 2)
# Make sure it prints the source of link with verbose flag
self.assertIn(b'ustar/symtype -> regtype', out)
self.assertIn(b'./ustar/linktest2/symtype -> ../linktest1/regtype', out)
self.assertIn(b'./ustar/linktest2/lnktype link to '
b'./ustar/linktest1/regtype', out)
self.assertIn(b'gnu' + (b'/123' * 125) + b'/longlink link to gnu' +
(b'/123' * 125) + b'/longname', out)
self.assertIn(b'pax' + (b'/123' * 125) + b'/longlink link to pax' +
(b'/123' * 125) + b'/longname', out)
def test_list_members(self):
tio = io.TextIOWrapper(io.BytesIO(), 'ascii', newline='\n')
def members(tar):
for tarinfo in tar.getmembers():
if 'reg' in tarinfo.name:
yield tarinfo
with support.swap_attr(sys, 'stdout', tio):
self.tar.list(verbose=False, members=members(self.tar))
out = tio.detach().getvalue()
self.assertIn(b'ustar/regtype', out)
self.assertNotIn(b'ustar/conttype', out)
class GzipListTest(GzipTest, ListTest):
pass
class Bz2ListTest(Bz2Test, ListTest):
pass
class LzmaListTest(LzmaTest, ListTest):
pass
class CommonReadTest(ReadTest):
def test_empty_tarfile(self):
# Test for issue6123: Allow opening empty archives.
# This test checks if tarfile.open() is able to open an empty tar
# archive successfully. Note that an empty tar archive is not the
# same as an empty file!
with tarfile.open(tmpname, self.mode.replace("r", "w")):
pass
try:
tar = tarfile.open(tmpname, self.mode)
tar.getnames()
except tarfile.ReadError:
self.fail("tarfile.open() failed on empty archive")
else:
self.assertListEqual(tar.getmembers(), [])
finally:
tar.close()
def test_non_existent_tarfile(self):
# Test for issue11513: prevent non-existent gzipped tarfiles raising
# multiple exceptions.
with self.assertRaisesRegex(FileNotFoundError, "xxx"):
tarfile.open("xxx", self.mode)
def test_null_tarfile(self):
# Test for issue6123: Allow opening empty archives.
# This test guarantees that tarfile.open() does not treat an empty
# file as an empty tar archive.
with open(tmpname, "wb"):
pass
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, self.mode)
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname)
def test_ignore_zeros(self):
# Test TarFile's ignore_zeros option.
# generate 512 pseudorandom bytes
data = Random(0).getrandbits(512*8).to_bytes(512, 'big')
for char in (b'\0', b'a'):
# Test if EOFHeaderError ('\0') and InvalidHeaderError ('a')
# are ignored correctly.
with self.open(tmpname, "w") as fobj:
fobj.write(char * 1024)
tarinfo = tarfile.TarInfo("foo")
tarinfo.size = len(data)
fobj.write(tarinfo.tobuf())
fobj.write(data)
tar = tarfile.open(tmpname, mode="r", ignore_zeros=True)
try:
self.assertListEqual(tar.getnames(), ["foo"],
"ignore_zeros=True should have skipped the %r-blocks" %
char)
finally:
tar.close()
def test_premature_end_of_archive(self):
for size in (512, 600, 1024, 1200):
with tarfile.open(tmpname, "w:") as tar:
t = tarfile.TarInfo("foo")
t.size = 1024
tar.addfile(t, io.BytesIO(b"a" * 1024))
with open(tmpname, "r+b") as fobj:
fobj.truncate(size)
with tarfile.open(tmpname) as tar:
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
for t in tar:
pass
with tarfile.open(tmpname) as tar:
t = tar.next()
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
tar.extract(t, TEMPDIR)
with self.assertRaisesRegex(tarfile.ReadError, "unexpected end of data"):
tar.extractfile(t).read()
class MiscReadTestBase(CommonReadTest):
def requires_name_attribute(self):
pass
def test_no_name_argument(self):
self.requires_name_attribute()
with open(self.tarname, "rb") as fobj:
self.assertIsInstance(fobj.name, str)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(fobj.name))
def test_no_name_attribute(self):
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
self.assertRaises(AttributeError, getattr, fobj, "name")
tar = tarfile.open(fileobj=fobj, mode=self.mode)
self.assertIsNone(tar.name)
def test_empty_name_attribute(self):
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
fobj.name = ""
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsNone(tar.name)
def test_int_name_attribute(self):
# Issue 21044: tarfile.open() should handle fileobj with an integer
# 'name' attribute.
fd = os.open(self.tarname, os.O_RDONLY)
with open(fd, 'rb') as fobj:
self.assertIsInstance(fobj.name, int)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsNone(tar.name)
def test_bytes_name_attribute(self):
self.requires_name_attribute()
tarname = os.fsencode(self.tarname)
with open(tarname, 'rb') as fobj:
self.assertIsInstance(fobj.name, bytes)
with tarfile.open(fileobj=fobj, mode=self.mode) as tar:
self.assertIsInstance(tar.name, bytes)
self.assertEqual(tar.name, os.path.abspath(fobj.name))
def test_pathlike_name(self):
tarname = pathlib.Path(self.tarname)
with tarfile.open(tarname, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
with self.taropen(tarname) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
with tarfile.TarFile.open(tarname, mode=self.mode) as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
if self.suffix == '':
with tarfile.TarFile(tarname, mode='r') as tar:
self.assertIsInstance(tar.name, str)
self.assertEqual(tar.name, os.path.abspath(os.fspath(tarname)))
def test_illegal_mode_arg(self):
with open(tmpname, 'wb'):
pass
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, 'q')
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, 'rw')
with self.assertRaisesRegex(ValueError, 'mode must be '):
tar = self.taropen(tmpname, '')
def test_fileobj_with_offset(self):
# Skip the first member and store values from the second member
# of the testtar.
tar = tarfile.open(self.tarname, mode=self.mode)
try:
tar.next()
t = tar.next()
name = t.name
offset = t.offset
with tar.extractfile(t) as f:
data = f.read()
finally:
tar.close()
# Open the testtar and seek to the offset of the second member.
with self.open(self.tarname) as fobj:
fobj.seek(offset)
# Test if the tarfile starts with the second member.
tar = tar.open(self.tarname, mode="r:", fileobj=fobj)
t = tar.next()
self.assertEqual(t.name, name)
# Read to the end of fileobj and test if seeking back to the
# beginning works.
tar.getmembers()
self.assertEqual(tar.extractfile(t).read(), data,
"seek back did not work")
tar.close()
def test_fail_comp(self):
# For Gzip and Bz2 Tests: fail with a ReadError on an uncompressed file.
self.assertRaises(tarfile.ReadError, tarfile.open, tarname, self.mode)
with open(tarname, "rb") as fobj:
self.assertRaises(tarfile.ReadError, tarfile.open,
fileobj=fobj, mode=self.mode)
def test_v7_dirtype(self):
# Test old style dirtype member (bug #1336623):
# Old V7 tars create directory members using an AREGTYPE
# header with a "/" appended to the filename field.
tarinfo = self.tar.getmember("misc/dirtype-old-v7")
self.assertEqual(tarinfo.type, tarfile.DIRTYPE,
"v7 dirtype failed")
def test_xstar_type(self):
# The xstar format stores extra atime and ctime fields inside the
# space reserved for the prefix field. The prefix field must be
# ignored in this case, otherwise it will mess up the name.
try:
self.tar.getmember("misc/regtype-xstar")
except KeyError:
self.fail("failed to find misc/regtype-xstar (mangled prefix?)")
def test_check_members(self):
for tarinfo in self.tar:
self.assertEqual(int(tarinfo.mtime), 0o7606136617,
"wrong mtime for %s" % tarinfo.name)
if not tarinfo.name.startswith("ustar/"):
continue
self.assertEqual(tarinfo.uname, "tarfile",
"wrong uname for %s" % tarinfo.name)
def test_find_members(self):
self.assertEqual(self.tar.getmembers()[-1].name, "misc/eof",
"could not find all members")
@unittest.skipUnless(hasattr(os, "link"),
"Missing hardlink implementation")
@support.skip_unless_symlink
def test_extract_hardlink(self):
# Test hardlink extraction (e.g. bug #857297).
with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar:
tar.extract("ustar/regtype", TEMPDIR)
self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/regtype"))
tar.extract("ustar/lnktype", TEMPDIR)
self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/lnktype"))
with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f:
data = f.read()
self.assertEqual(md5sum(data), md5_regtype)
tar.extract("ustar/symtype", TEMPDIR)
self.addCleanup(support.unlink, os.path.join(TEMPDIR, "ustar/symtype"))
with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f:
data = f.read()
self.assertEqual(md5sum(data), md5_regtype)
def test_extractall(self):
# Test if extractall() correctly restores directory permissions
# and times (see issue1735).
tar = tarfile.open(tarname, encoding="iso8859-1")
DIR = os.path.join(TEMPDIR, "extractall")
os.mkdir(DIR)
try:
directories = [t for t in tar if t.isdir()]
tar.extractall(DIR, directories)
for tarinfo in directories:
path = os.path.join(DIR, tarinfo.name)
if sys.platform != "win32":
# Win32 has no support for fine grained permissions.
self.assertEqual(tarinfo.mode & 0o777,
os.stat(path).st_mode & 0o777)
def format_mtime(mtime):
if isinstance(mtime, float):
return "{} ({})".format(mtime, mtime.hex())
else:
return "{!r} (int)".format(mtime)
file_mtime = os.path.getmtime(path)
errmsg = "tar mtime {0} != file time {1} of path {2!a}".format(
format_mtime(tarinfo.mtime),
format_mtime(file_mtime),
path)
self.assertEqual(tarinfo.mtime, file_mtime, errmsg)
finally:
tar.close()
support.rmtree(DIR)
def test_extract_directory(self):
dirtype = "ustar/dirtype"
DIR = os.path.join(TEMPDIR, "extractdir")
os.mkdir(DIR)
try:
with tarfile.open(tarname, encoding="iso8859-1") as tar:
tarinfo = tar.getmember(dirtype)
tar.extract(tarinfo, path=DIR)
extracted = os.path.join(DIR, dirtype)
self.assertEqual(os.path.getmtime(extracted), tarinfo.mtime)
if sys.platform != "win32":
self.assertEqual(os.stat(extracted).st_mode & 0o777, 0o755)
finally:
support.rmtree(DIR)
def test_extractall_pathlike_name(self):
DIR = pathlib.Path(TEMPDIR) / "extractall"
with support.temp_dir(DIR), \
tarfile.open(tarname, encoding="iso8859-1") as tar:
directories = [t for t in tar if t.isdir()]
tar.extractall(DIR, directories)
for tarinfo in directories:
path = DIR / tarinfo.name
self.assertEqual(os.path.getmtime(path), tarinfo.mtime)
def test_extract_pathlike_name(self):
dirtype = "ustar/dirtype"
DIR = pathlib.Path(TEMPDIR) / "extractall"
with support.temp_dir(DIR), \
tarfile.open(tarname, encoding="iso8859-1") as tar:
tarinfo = tar.getmember(dirtype)
tar.extract(tarinfo, path=DIR)
extracted = DIR / dirtype
self.assertEqual(os.path.getmtime(extracted), tarinfo.mtime)
def test_init_close_fobj(self):
# Issue #7341: Close the internal file object in the TarFile
# constructor in case of an error. For the test we rely on
# the fact that opening an empty file raises a ReadError.
empty = os.path.join(TEMPDIR, "empty")
with open(empty, "wb") as fobj:
fobj.write(b"")
try:
tar = object.__new__(tarfile.TarFile)
try:
tar.__init__(empty)
except tarfile.ReadError:
self.assertTrue(tar.fileobj.closed)
else:
self.fail("ReadError not raised")
finally:
support.unlink(empty)
def test_parallel_iteration(self):
# Issue #16601: Restarting iteration over tarfile continued
# from where it left off.
with tarfile.open(self.tarname) as tar:
for m1, m2 in zip(tar, tar):
self.assertEqual(m1.offset, m2.offset)
self.assertEqual(m1.get_info(), m2.get_info())
class MiscReadTest(MiscReadTestBase, unittest.TestCase):
test_fail_comp = None
class GzipMiscReadTest(GzipTest, MiscReadTestBase, unittest.TestCase):
pass
class Bz2MiscReadTest(Bz2Test, MiscReadTestBase, unittest.TestCase):
def requires_name_attribute(self):
self.skipTest("BZ2File have no name attribute")
class LzmaMiscReadTest(LzmaTest, MiscReadTestBase, unittest.TestCase):
def requires_name_attribute(self):
self.skipTest("LZMAFile have no name attribute")
class StreamReadTest(CommonReadTest, unittest.TestCase):
prefix="r|"
def test_read_through(self):
# Issue #11224: A poorly designed _FileInFile.read() method
# caused seeking errors with stream tar files.
for tarinfo in self.tar:
if not tarinfo.isreg():
continue
with self.tar.extractfile(tarinfo) as fobj:
while True:
try:
buf = fobj.read(512)
except tarfile.StreamError:
self.fail("simple read-through using "
"TarFile.extractfile() failed")
if not buf:
break
def test_fileobj_regular_file(self):
tarinfo = self.tar.next() # get "regtype" (can't use getmember)
with self.tar.extractfile(tarinfo) as fobj:
data = fobj.read()
self.assertEqual(len(data), tarinfo.size,
"regular file extraction failed")
self.assertEqual(md5sum(data), md5_regtype,
"regular file extraction failed")
def test_provoke_stream_error(self):
tarinfos = self.tar.getmembers()
with self.tar.extractfile(tarinfos[0]) as f: # read the first member
self.assertRaises(tarfile.StreamError, f.read)
def test_compare_members(self):
tar1 = tarfile.open(tarname, encoding="iso8859-1")
try:
tar2 = self.tar
while True:
t1 = tar1.next()
t2 = tar2.next()
if t1 is None:
break
self.assertIsNotNone(t2, "stream.next() failed.")
if t2.islnk() or t2.issym():
with self.assertRaises(tarfile.StreamError):
tar2.extractfile(t2)
continue
v1 = tar1.extractfile(t1)
v2 = tar2.extractfile(t2)
if v1 is None:
continue
self.assertIsNotNone(v2, "stream.extractfile() failed")
self.assertEqual(v1.read(), v2.read(),
"stream extraction failed")
finally:
tar1.close()
class GzipStreamReadTest(GzipTest, StreamReadTest):
pass
class Bz2StreamReadTest(Bz2Test, StreamReadTest):
pass
class LzmaStreamReadTest(LzmaTest, StreamReadTest):
pass
class DetectReadTest(TarTest, unittest.TestCase):
def _testfunc_file(self, name, mode):
try:
tar = tarfile.open(name, mode)
except tarfile.ReadError as e:
self.fail()
else:
tar.close()
def _testfunc_fileobj(self, name, mode):
try:
with open(name, "rb") as f:
tar = tarfile.open(name, mode, fileobj=f)
except tarfile.ReadError as e:
self.fail()
else:
tar.close()
def _test_modes(self, testfunc):
if self.suffix:
with self.assertRaises(tarfile.ReadError):
tarfile.open(tarname, mode="r:" + self.suffix)
with self.assertRaises(tarfile.ReadError):
tarfile.open(tarname, mode="r|" + self.suffix)
with self.assertRaises(tarfile.ReadError):
tarfile.open(self.tarname, mode="r:")
with self.assertRaises(tarfile.ReadError):
tarfile.open(self.tarname, mode="r|")
testfunc(self.tarname, "r")
testfunc(self.tarname, "r:" + self.suffix)
testfunc(self.tarname, "r:*")
testfunc(self.tarname, "r|" + self.suffix)
testfunc(self.tarname, "r|*")
def test_detect_file(self):
self._test_modes(self._testfunc_file)
def test_detect_fileobj(self):
self._test_modes(self._testfunc_fileobj)
class GzipDetectReadTest(GzipTest, DetectReadTest):
pass
class Bz2DetectReadTest(Bz2Test, DetectReadTest):
def test_detect_stream_bz2(self):
# Originally, tarfile's stream detection looked for the string
# "BZh91" at the start of the file. This is incorrect because
# the '9' represents the blocksize (900kB). If the file was
# compressed using another blocksize autodetection fails.
with open(tarname, "rb") as fobj:
data = fobj.read()
# Compress with blocksize 100kB, the file starts with "BZh11".
with bz2.BZ2File(tmpname, "wb", compresslevel=1) as fobj:
fobj.write(data)
self._testfunc_file(tmpname, "r|*")
class LzmaDetectReadTest(LzmaTest, DetectReadTest):
pass
class MemberReadTest(ReadTest, unittest.TestCase):
def _test_member(self, tarinfo, chksum=None, **kwargs):
if chksum is not None:
with self.tar.extractfile(tarinfo) as f:
self.assertEqual(md5sum(f.read()), chksum,
"wrong md5sum for %s" % tarinfo.name)
kwargs["mtime"] = 0o7606136617
kwargs["uid"] = 1000
kwargs["gid"] = 100
if "old-v7" not in tarinfo.name:
# V7 tar can't handle alphabetic owners.
kwargs["uname"] = "tarfile"
kwargs["gname"] = "tarfile"
for k, v in kwargs.items():
self.assertEqual(getattr(tarinfo, k), v,
"wrong value in %s field of %s" % (k, tarinfo.name))
def test_find_regtype(self):
tarinfo = self.tar.getmember("ustar/regtype")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
def test_find_conttype(self):
tarinfo = self.tar.getmember("ustar/conttype")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
def test_find_dirtype(self):
tarinfo = self.tar.getmember("ustar/dirtype")
self._test_member(tarinfo, size=0)
def test_find_dirtype_with_size(self):
tarinfo = self.tar.getmember("ustar/dirtype-with-size")
self._test_member(tarinfo, size=255)
def test_find_lnktype(self):
tarinfo = self.tar.getmember("ustar/lnktype")
self._test_member(tarinfo, size=0, linkname="ustar/regtype")
def test_find_symtype(self):
tarinfo = self.tar.getmember("ustar/symtype")
self._test_member(tarinfo, size=0, linkname="regtype")
def test_find_blktype(self):
tarinfo = self.tar.getmember("ustar/blktype")
self._test_member(tarinfo, size=0, devmajor=3, devminor=0)
def test_find_chrtype(self):
tarinfo = self.tar.getmember("ustar/chrtype")
self._test_member(tarinfo, size=0, devmajor=1, devminor=3)
def test_find_fifotype(self):
tarinfo = self.tar.getmember("ustar/fifotype")
self._test_member(tarinfo, size=0)
def test_find_sparse(self):
tarinfo = self.tar.getmember("ustar/sparse")
self._test_member(tarinfo, size=86016, chksum=md5_sparse)
def test_find_gnusparse(self):
tarinfo = self.tar.getmember("gnu/sparse")
self._test_member(tarinfo, size=86016, chksum=md5_sparse)
def test_find_gnusparse_00(self):
tarinfo = self.tar.getmember("gnu/sparse-0.0")
self._test_member(tarinfo, size=86016, chksum=md5_sparse)
def test_find_gnusparse_01(self):
tarinfo = self.tar.getmember("gnu/sparse-0.1")
self._test_member(tarinfo, size=86016, chksum=md5_sparse)
def test_find_gnusparse_10(self):
tarinfo = self.tar.getmember("gnu/sparse-1.0")
self._test_member(tarinfo, size=86016, chksum=md5_sparse)
def test_find_umlauts(self):
tarinfo = self.tar.getmember("ustar/umlauts-"
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
def test_find_ustar_longname(self):
name = "ustar/" + "12345/" * 39 + "1234567/longname"
self.assertIn(name, self.tar.getnames())
def test_find_regtype_oldv7(self):
tarinfo = self.tar.getmember("misc/regtype-old-v7")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
def test_find_pax_umlauts(self):
self.tar.close()
self.tar = tarfile.open(self.tarname, mode=self.mode,
encoding="iso8859-1")
tarinfo = self.tar.getmember("pax/umlauts-"
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
self._test_member(tarinfo, size=7011, chksum=md5_regtype)
class LongnameTest:
def test_read_longname(self):
# Test reading of longname (bug #1471427).
longname = self.subdir + "/" + "123/" * 125 + "longname"
try:
tarinfo = self.tar.getmember(longname)
except KeyError:
self.fail("longname not found")
self.assertNotEqual(tarinfo.type, tarfile.DIRTYPE,
"read longname as dirtype")
def test_read_longlink(self):
longname = self.subdir + "/" + "123/" * 125 + "longname"
longlink = self.subdir + "/" + "123/" * 125 + "longlink"
try:
tarinfo = self.tar.getmember(longlink)
except KeyError:
self.fail("longlink not found")
self.assertEqual(tarinfo.linkname, longname, "linkname wrong")
def test_truncated_longname(self):
longname = self.subdir + "/" + "123/" * 125 + "longname"
tarinfo = self.tar.getmember(longname)
offset = tarinfo.offset
self.tar.fileobj.seek(offset)
fobj = io.BytesIO(self.tar.fileobj.read(3 * 512))
with self.assertRaises(tarfile.ReadError):
tarfile.open(name="foo.tar", fileobj=fobj)
def test_header_offset(self):
# Test if the start offset of the TarInfo object includes
# the preceding extended header.
longname = self.subdir + "/" + "123/" * 125 + "longname"
offset = self.tar.getmember(longname).offset
with open(tarname, "rb") as fobj:
fobj.seek(offset)
tarinfo = tarfile.TarInfo.frombuf(fobj.read(512),
"iso8859-1", "strict")
self.assertEqual(tarinfo.type, self.longnametype)
class GNUReadTest(LongnameTest, ReadTest, unittest.TestCase):
subdir = "gnu"
longnametype = tarfile.GNUTYPE_LONGNAME
# Since 3.2 tarfile is supposed to accurately restore sparse members and
# produce files with holes. This is what we actually want to test here.
# Unfortunately, not all platforms/filesystems support sparse files, and
# even on platforms that do it is non-trivial to make reliable assertions
# about holes in files. Therefore, we first do one basic test which works
# an all platforms, and after that a test that will work only on
# platforms/filesystems that prove to support sparse files.
def _test_sparse_file(self, name):
self.tar.extract(name, TEMPDIR)
filename = os.path.join(TEMPDIR, name)
with open(filename, "rb") as fobj:
data = fobj.read()
self.assertEqual(md5sum(data), md5_sparse,
"wrong md5sum for %s" % name)
if self._fs_supports_holes():
s = os.stat(filename)
self.assertLess(s.st_blocks * 512, s.st_size)
def test_sparse_file_old(self):
self._test_sparse_file("gnu/sparse")
def test_sparse_file_00(self):
self._test_sparse_file("gnu/sparse-0.0")
def test_sparse_file_01(self):
self._test_sparse_file("gnu/sparse-0.1")
def test_sparse_file_10(self):
self._test_sparse_file("gnu/sparse-1.0")
@staticmethod
def _fs_supports_holes():
# Return True if the platform knows the st_blocks stat attribute and
# uses st_blocks units of 512 bytes, and if the filesystem is able to
# store holes in files.
if sys.platform.startswith("linux"):
# Linux evidentially has 512 byte st_blocks units.
name = os.path.join(TEMPDIR, "sparse-test")
with open(name, "wb") as fobj:
fobj.seek(4096)
fobj.truncate()
s = os.stat(name)
support.unlink(name)
return s.st_blocks == 0
else:
return False
class PaxReadTest(LongnameTest, ReadTest, unittest.TestCase):
subdir = "pax"
longnametype = tarfile.XHDTYPE
def test_pax_global_headers(self):
tar = tarfile.open(tarname, encoding="iso8859-1")
try:
tarinfo = tar.getmember("pax/regtype1")
self.assertEqual(tarinfo.uname, "foo")
self.assertEqual(tarinfo.gname, "bar")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
tarinfo = tar.getmember("pax/regtype2")
self.assertEqual(tarinfo.uname, "")
self.assertEqual(tarinfo.gname, "bar")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
tarinfo = tar.getmember("pax/regtype3")
self.assertEqual(tarinfo.uname, "tarfile")
self.assertEqual(tarinfo.gname, "tarfile")
self.assertEqual(tarinfo.pax_headers.get("VENDOR.umlauts"),
"\xc4\xd6\xdc\xe4\xf6\xfc\xdf")
finally:
tar.close()
def test_pax_number_fields(self):
# All following number fields are read from the pax header.
tar = tarfile.open(tarname, encoding="iso8859-1")
try:
tarinfo = tar.getmember("pax/regtype4")
self.assertEqual(tarinfo.size, 7011)
self.assertEqual(tarinfo.uid, 123)
self.assertEqual(tarinfo.gid, 123)
self.assertEqual(tarinfo.mtime, 1041808783.0)
self.assertEqual(type(tarinfo.mtime), float)
self.assertEqual(float(tarinfo.pax_headers["atime"]), 1041808783.0)
self.assertEqual(float(tarinfo.pax_headers["ctime"]), 1041808783.0)
finally:
tar.close()
class WriteTestBase(TarTest):
# Put all write tests in here that are supposed to be tested
# in all possible mode combinations.
def test_fileobj_no_close(self):
fobj = io.BytesIO()
tar = tarfile.open(fileobj=fobj, mode=self.mode)
tar.addfile(tarfile.TarInfo("foo"))
tar.close()
self.assertFalse(fobj.closed, "external fileobjs must never closed")
# Issue #20238: Incomplete gzip output with mode="w:gz"
data = fobj.getvalue()
del tar
support.gc_collect()
self.assertFalse(fobj.closed)
self.assertEqual(data, fobj.getvalue())
def test_eof_marker(self):
# Make sure an end of archive marker is written (two zero blocks).
# tarfile insists on aligning archives to a 20 * 512 byte recordsize.
# So, we create an archive that has exactly 10240 bytes without the
# marker, and has 20480 bytes once the marker is written.
with tarfile.open(tmpname, self.mode) as tar:
t = tarfile.TarInfo("foo")
t.size = tarfile.RECORDSIZE - tarfile.BLOCKSIZE
tar.addfile(t, io.BytesIO(b"a" * t.size))
with self.open(tmpname, "rb") as fobj:
self.assertEqual(len(fobj.read()), tarfile.RECORDSIZE * 2)
class WriteTest(WriteTestBase, unittest.TestCase):
prefix = "w:"
def test_100_char_name(self):
# The name field in a tar header stores strings of at most 100 chars.
# If a string is shorter than 100 chars it has to be padded with '\0',
# which implies that a string of exactly 100 chars is stored without
# a trailing '\0'.
name = "0123456789" * 10
tar = tarfile.open(tmpname, self.mode)
try:
t = tarfile.TarInfo(name)
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
self.assertEqual(tar.getnames()[0], name,
"failed to store 100 char filename")
finally:
tar.close()
def test_tar_size(self):
# Test for bug #1013882.
tar = tarfile.open(tmpname, self.mode)
try:
path = os.path.join(TEMPDIR, "file")
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tar.add(path)
finally:
tar.close()
self.assertGreater(os.path.getsize(tmpname), 0,
"tarfile is empty")
# The test_*_size tests test for bug #1167128.
def test_file_size(self):
tar = tarfile.open(tmpname, self.mode)
try:
path = os.path.join(TEMPDIR, "file")
with open(path, "wb"):
pass
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 3)
finally:
tar.close()
def test_directory_size(self):
path = os.path.join(TEMPDIR, "directory")
os.mkdir(path)
try:
tar = tarfile.open(tmpname, self.mode)
try:
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
support.rmdir(path)
def test_gettarinfo_pathlike_name(self):
with tarfile.open(tmpname, self.mode) as tar:
path = pathlib.Path(TEMPDIR) / "file"
with open(path, "wb") as fobj:
fobj.write(b"aaa")
tarinfo = tar.gettarinfo(path)
tarinfo2 = tar.gettarinfo(os.fspath(path))
self.assertIsInstance(tarinfo.name, str)
self.assertEqual(tarinfo.name, tarinfo2.name)
self.assertEqual(tarinfo.size, 3)
@unittest.skipUnless(hasattr(os, "link"),
"Missing hardlink implementation")
def test_link_size(self):
link = os.path.join(TEMPDIR, "link")
target = os.path.join(TEMPDIR, "link_target")
with open(target, "wb") as fobj:
fobj.write(b"aaa")
os.link(target, link)
try:
tar = tarfile.open(tmpname, self.mode)
try:
# Record the link target in the inodes list.
tar.gettarinfo(target)
tarinfo = tar.gettarinfo(link)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
support.unlink(target)
support.unlink(link)
@support.skip_unless_symlink
def test_symlink_size(self):
path = os.path.join(TEMPDIR, "symlink")
os.symlink("link_target", path)
try:
tar = tarfile.open(tmpname, self.mode)
try:
tarinfo = tar.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
finally:
tar.close()
finally:
support.unlink(path)
def test_add_self(self):
# Test for #1257255.
dstname = os.path.abspath(tmpname)
tar = tarfile.open(tmpname, self.mode)
try:
self.assertEqual(tar.name, dstname,
"archive name must be absolute")
tar.add(dstname)
self.assertEqual(tar.getnames(), [],
"added the archive to itself")
with support.change_cwd(TEMPDIR):
tar.add(dstname)
self.assertEqual(tar.getnames(), [],
"added the archive to itself")
finally:
tar.close()
def test_filter(self):
tempdir = os.path.join(TEMPDIR, "filter")
os.mkdir(tempdir)
try:
for name in ("foo", "bar", "baz"):
name = os.path.join(tempdir, name)
support.create_empty_file(name)
def filter(tarinfo):
if os.path.basename(tarinfo.name) == "bar":
return
tarinfo.uid = 123
tarinfo.uname = "foo"
return tarinfo
tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1")
try:
tar.add(tempdir, arcname="empty_dir", filter=filter)
finally:
tar.close()
# Verify that filter is a keyword-only argument
with self.assertRaises(TypeError):
tar.add(tempdir, "empty_dir", True, None, filter)
tar = tarfile.open(tmpname, "r")
try:
for tarinfo in tar:
self.assertEqual(tarinfo.uid, 123)
self.assertEqual(tarinfo.uname, "foo")
self.assertEqual(len(tar.getmembers()), 3)
finally:
tar.close()
finally:
support.rmtree(tempdir)
# Guarantee that stored pathnames are not modified. Don't
# remove ./ or ../ or double slashes. Still make absolute
# pathnames relative.
# For details see bug #6054.
def _test_pathname(self, path, cmp_path=None, dir=False):
# Create a tarfile with an empty member named path
# and compare the stored name with the original.
foo = os.path.join(TEMPDIR, "foo")
if not dir:
support.create_empty_file(foo)
else:
os.mkdir(foo)
tar = tarfile.open(tmpname, self.mode)
try:
tar.add(foo, arcname=path)
finally:
tar.close()
tar = tarfile.open(tmpname, "r")
try:
t = tar.next()
finally:
tar.close()
if not dir:
support.unlink(foo)
else:
support.rmdir(foo)
self.assertEqual(t.name, cmp_path or path.replace(os.sep, "/"))
@support.skip_unless_symlink
def test_extractall_symlinks(self):
# Test if extractall works properly when tarfile contains symlinks
tempdir = os.path.join(TEMPDIR, "testsymlinks")
temparchive = os.path.join(TEMPDIR, "testsymlinks.tar")
os.mkdir(tempdir)
try:
source_file = os.path.join(tempdir,'source')
target_file = os.path.join(tempdir,'symlink')
with open(source_file,'w') as f:
f.write('something\n')
os.symlink(source_file, target_file)
tar = tarfile.open(temparchive,'w')
tar.add(source_file)
tar.add(target_file)
tar.close()
# Let's extract it to the location which contains the symlink
tar = tarfile.open(temparchive,'r')
# this should not raise OSError: [Errno 17] File exists
try:
tar.extractall(path=tempdir)
except OSError:
self.fail("extractall failed with symlinked files")
finally:
tar.close()
finally:
support.unlink(temparchive)
support.rmtree(tempdir)
def test_pathnames(self):
self._test_pathname("foo")
self._test_pathname(os.path.join("foo", ".", "bar"))
self._test_pathname(os.path.join("foo", "..", "bar"))
self._test_pathname(os.path.join(".", "foo"))
self._test_pathname(os.path.join(".", "foo", "."))
self._test_pathname(os.path.join(".", "foo", ".", "bar"))
self._test_pathname(os.path.join(".", "foo", "..", "bar"))
self._test_pathname(os.path.join(".", "foo", "..", "bar"))
self._test_pathname(os.path.join("..", "foo"))
self._test_pathname(os.path.join("..", "foo", ".."))
self._test_pathname(os.path.join("..", "foo", ".", "bar"))
self._test_pathname(os.path.join("..", "foo", "..", "bar"))
self._test_pathname("foo" + os.sep + os.sep + "bar")
self._test_pathname("foo" + os.sep + os.sep, "foo", dir=True)
def test_abs_pathnames(self):
if sys.platform == "win32":
self._test_pathname("C:\\foo", "foo")
else:
self._test_pathname("/foo", "foo")
self._test_pathname("///foo", "foo")
def test_cwd(self):
# Test adding the current working directory.
with support.change_cwd(TEMPDIR):
tar = tarfile.open(tmpname, self.mode)
try:
tar.add(".")
finally:
tar.close()
tar = tarfile.open(tmpname, "r")
try:
for t in tar:
if t.name != ".":
self.assertTrue(t.name.startswith("./"), t.name)
finally:
tar.close()
def test_open_nonwritable_fileobj(self):
for exctype in OSError, EOFError, RuntimeError:
class BadFile(io.BytesIO):
first = True
def write(self, data):
if self.first:
self.first = False
raise exctype
f = BadFile()
with self.assertRaises(exctype):
tar = tarfile.open(tmpname, self.mode, fileobj=f,
format=tarfile.PAX_FORMAT,
pax_headers={'non': 'empty'})
self.assertFalse(f.closed)
class GzipWriteTest(GzipTest, WriteTest):
pass
class Bz2WriteTest(Bz2Test, WriteTest):
pass
class LzmaWriteTest(LzmaTest, WriteTest):
pass
class StreamWriteTest(WriteTestBase, unittest.TestCase):
prefix = "w|"
decompressor = None
def test_stream_padding(self):
# Test for bug #1543303.
tar = tarfile.open(tmpname, self.mode)
tar.close()
if self.decompressor:
dec = self.decompressor()
with open(tmpname, "rb") as fobj:
data = fobj.read()
data = dec.decompress(data)
self.assertFalse(dec.unused_data, "found trailing data")
else:
with self.open(tmpname) as fobj:
data = fobj.read()
self.assertEqual(data.count(b"\0"), tarfile.RECORDSIZE,
"incorrect zero padding")
@unittest.skipUnless(sys.platform != "win32" and hasattr(os, "umask"),
"Missing umask implementation")
def test_file_mode(self):
# Test for issue #8464: Create files with correct
# permissions.
if os.path.exists(tmpname):
support.unlink(tmpname)
original_umask = os.umask(0o022)
try:
tar = tarfile.open(tmpname, self.mode)
tar.close()
mode = os.stat(tmpname).st_mode & 0o777
self.assertEqual(mode, 0o644, "wrong file permissions")
finally:
os.umask(original_umask)
class GzipStreamWriteTest(GzipTest, StreamWriteTest):
pass
class Bz2StreamWriteTest(Bz2Test, StreamWriteTest):
decompressor = bz2.BZ2Decompressor if bz2 else None
class LzmaStreamWriteTest(LzmaTest, StreamWriteTest):
decompressor = lzma.LZMADecompressor if lzma else None
class GNUWriteTest(unittest.TestCase):
# This testcase checks for correct creation of GNU Longname
# and Longlink extended headers (cp. bug #812325).
def _length(self, s):
blocks = len(s) // 512 + 1
return blocks * 512
def _calc_size(self, name, link=None):
# Initial tar header
count = 512
if len(name) > tarfile.LENGTH_NAME:
# GNU longname extended header + longname
count += 512
count += self._length(name)
if link is not None and len(link) > tarfile.LENGTH_LINK:
# GNU longlink extended header + longlink
count += 512
count += self._length(link)
return count
def _test(self, name, link=None):
tarinfo = tarfile.TarInfo(name)
if link:
tarinfo.linkname = link
tarinfo.type = tarfile.LNKTYPE
tar = tarfile.open(tmpname, "w")
try:
tar.format = tarfile.GNU_FORMAT
tar.addfile(tarinfo)
v1 = self._calc_size(name, link)
v2 = tar.offset
self.assertEqual(v1, v2, "GNU longname/longlink creation failed")
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
member = tar.next()
self.assertIsNotNone(member,
"unable to read longname member")
self.assertEqual(tarinfo.name, member.name,
"unable to read longname member")
self.assertEqual(tarinfo.linkname, member.linkname,
"unable to read longname member")
finally:
tar.close()
def test_longname_1023(self):
self._test(("longnam/" * 127) + "longnam")
def test_longname_1024(self):
self._test(("longnam/" * 127) + "longname")
def test_longname_1025(self):
self._test(("longnam/" * 127) + "longname_")
def test_longlink_1023(self):
self._test("name", ("longlnk/" * 127) + "longlnk")
def test_longlink_1024(self):
self._test("name", ("longlnk/" * 127) + "longlink")
def test_longlink_1025(self):
self._test("name", ("longlnk/" * 127) + "longlink_")
def test_longnamelink_1023(self):
self._test(("longnam/" * 127) + "longnam",
("longlnk/" * 127) + "longlnk")
def test_longnamelink_1024(self):
self._test(("longnam/" * 127) + "longname",
("longlnk/" * 127) + "longlink")
def test_longnamelink_1025(self):
self._test(("longnam/" * 127) + "longname_",
("longlnk/" * 127) + "longlink_")
class CreateTest(WriteTestBase, unittest.TestCase):
prefix = "x:"
file_path = os.path.join(TEMPDIR, "spameggs42")
def setUp(self):
support.unlink(tmpname)
@classmethod
def setUpClass(cls):
with open(cls.file_path, "wb") as fobj:
fobj.write(b"aaa")
@classmethod
def tearDownClass(cls):
support.unlink(cls.file_path)
def test_create(self):
with tarfile.open(tmpname, self.mode) as tobj:
tobj.add(self.file_path)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_existing(self):
with tarfile.open(tmpname, self.mode) as tobj:
tobj.add(self.file_path)
with self.assertRaises(FileExistsError):
tobj = tarfile.open(tmpname, self.mode)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_taropen(self):
with self.taropen(tmpname, "x") as tobj:
tobj.add(self.file_path)
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_existing_taropen(self):
with self.taropen(tmpname, "x") as tobj:
tobj.add(self.file_path)
with self.assertRaises(FileExistsError):
with self.taropen(tmpname, "x"):
pass
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn("spameggs42", names[0])
def test_create_pathlike_name(self):
with tarfile.open(pathlib.Path(tmpname), self.mode) as tobj:
self.assertIsInstance(tobj.name, str)
self.assertEqual(tobj.name, os.path.abspath(tmpname))
tobj.add(pathlib.Path(self.file_path))
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
def test_create_taropen_pathlike_name(self):
with self.taropen(pathlib.Path(tmpname), "x") as tobj:
self.assertIsInstance(tobj.name, str)
self.assertEqual(tobj.name, os.path.abspath(tmpname))
tobj.add(pathlib.Path(self.file_path))
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
with self.taropen(tmpname) as tobj:
names = tobj.getnames()
self.assertEqual(len(names), 1)
self.assertIn('spameggs42', names[0])
class GzipCreateTest(GzipTest, CreateTest):
pass
class Bz2CreateTest(Bz2Test, CreateTest):
pass
class LzmaCreateTest(LzmaTest, CreateTest):
pass
class CreateWithXModeTest(CreateTest):
prefix = "x"
test_create_taropen = None
test_create_existing_taropen = None
@unittest.skipUnless(hasattr(os, "link"), "Missing hardlink implementation")
class HardlinkTest(unittest.TestCase):
# Test the creation of LNKTYPE (hardlink) members in an archive.
def setUp(self):
self.foo = os.path.join(TEMPDIR, "foo")
self.bar = os.path.join(TEMPDIR, "bar")
with open(self.foo, "wb") as fobj:
fobj.write(b"foo")
os.link(self.foo, self.bar)
self.tar = tarfile.open(tmpname, "w")
self.tar.add(self.foo)
def tearDown(self):
self.tar.close()
support.unlink(self.foo)
support.unlink(self.bar)
def test_add_twice(self):
# The same name will be added as a REGTYPE every
# time regardless of st_nlink.
tarinfo = self.tar.gettarinfo(self.foo)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"add file as regular failed")
def test_add_hardlink(self):
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.LNKTYPE,
"add file as hardlink failed")
def test_dereference_hardlink(self):
self.tar.dereference = True
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"dereferencing hardlink failed")
class PaxWriteTest(GNUWriteTest):
def _test(self, name, link=None):
# See GNUWriteTest.
tarinfo = tarfile.TarInfo(name)
if link:
tarinfo.linkname = link
tarinfo.type = tarfile.LNKTYPE
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT)
try:
tar.addfile(tarinfo)
finally:
tar.close()
tar = tarfile.open(tmpname)
try:
if link:
l = tar.getmembers()[0].linkname
self.assertEqual(link, l, "PAX longlink creation failed")
else:
n = tar.getmembers()[0].name
self.assertEqual(name, n, "PAX longname creation failed")
finally:
tar.close()
def test_pax_global_header(self):
pax_headers = {
"foo": "bar",
"uid": "0",
"mtime": "1.23",
"test": "\xe4\xf6\xfc",
"\xe4\xf6\xfc": "test"}
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
pax_headers=pax_headers)
try:
tar.addfile(tarfile.TarInfo("test"))
finally:
tar.close()
# Test if the global header was written correctly.
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
self.assertEqual(tar.pax_headers, pax_headers)
self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers)
# Test if all the fields are strings.
for key, val in tar.pax_headers.items():
self.assertIsNot(type(key), bytes)
self.assertIsNot(type(val), bytes)
if key in tarfile.PAX_NUMBER_FIELDS:
try:
tarfile.PAX_NUMBER_FIELDS[key](val)
except (TypeError, ValueError):
self.fail("unable to convert pax header field")
finally:
tar.close()
def test_pax_extended_header(self):
# The fields from the pax header have priority over the
# TarInfo.
pax_headers = {"path": "foo", "uid": "123"}
tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT,
encoding="iso8859-1")
try:
t = tarfile.TarInfo()
t.name = "\xe4\xf6\xfc" # non-ASCII
t.uid = 8**8 # too large
t.pax_headers = pax_headers
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
t = tar.getmembers()[0]
self.assertEqual(t.pax_headers, pax_headers)
self.assertEqual(t.name, "foo")
self.assertEqual(t.uid, 123)
finally:
tar.close()
class UnicodeTest:
def test_iso8859_1_filename(self):
self._test_unicode_filename("iso8859-1")
def test_utf7_filename(self):
self._test_unicode_filename("utf7")
def test_utf8_filename(self):
self._test_unicode_filename("utf-8")
def _test_unicode_filename(self, encoding):
tar = tarfile.open(tmpname, "w", format=self.format,
encoding=encoding, errors="strict")
try:
name = "\xe4\xf6\xfc"
tar.addfile(tarfile.TarInfo(name))
finally:
tar.close()
tar = tarfile.open(tmpname, encoding=encoding)
try:
self.assertEqual(tar.getmembers()[0].name, name)
finally:
tar.close()
def test_unicode_filename_error(self):
tar = tarfile.open(tmpname, "w", format=self.format,
encoding="ascii", errors="strict")
try:
tarinfo = tarfile.TarInfo()
tarinfo.name = "\xe4\xf6\xfc"
self.assertRaises(UnicodeError, tar.addfile, tarinfo)
tarinfo.name = "foo"
tarinfo.uname = "\xe4\xf6\xfc"
self.assertRaises(UnicodeError, tar.addfile, tarinfo)
finally:
tar.close()
def test_unicode_argument(self):
tar = tarfile.open(tarname, "r",
encoding="iso8859-1", errors="strict")
try:
for t in tar:
self.assertIs(type(t.name), str)
self.assertIs(type(t.linkname), str)
self.assertIs(type(t.uname), str)
self.assertIs(type(t.gname), str)
finally:
tar.close()
def test_uname_unicode(self):
t = tarfile.TarInfo("foo")
t.uname = "\xe4\xf6\xfc"
t.gname = "\xe4\xf6\xfc"
tar = tarfile.open(tmpname, mode="w", format=self.format,
encoding="iso8859-1")
try:
tar.addfile(t)
finally:
tar.close()
tar = tarfile.open(tmpname, encoding="iso8859-1")
try:
t = tar.getmember("foo")
self.assertEqual(t.uname, "\xe4\xf6\xfc")
self.assertEqual(t.gname, "\xe4\xf6\xfc")
if self.format != tarfile.PAX_FORMAT:
tar.close()
tar = tarfile.open(tmpname, encoding="ascii")
t = tar.getmember("foo")
self.assertEqual(t.uname, "\udce4\udcf6\udcfc")
self.assertEqual(t.gname, "\udce4\udcf6\udcfc")
finally:
tar.close()
class UstarUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.USTAR_FORMAT
# Test whether the utf-8 encoded version of a filename exceeds the 100
# bytes name field limit (every occurrence of '\xff' will be expanded to 2
# bytes).
def test_unicode_name1(self):
self._test_ustar_name("0123456789" * 10)
self._test_ustar_name("0123456789" * 10 + "0", ValueError)
self._test_ustar_name("0123456789" * 9 + "01234567\xff")
self._test_ustar_name("0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_name2(self):
self._test_ustar_name("0123456789" * 9 + "012345\xff\xff")
self._test_ustar_name("0123456789" * 9 + "0123456\xff\xff", ValueError)
# Test whether the utf-8 encoded version of a filename exceeds the 155
# bytes prefix + '/' + 100 bytes name limit.
def test_unicode_longname1(self):
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 10)
self._test_ustar_name("0123456789" * 15 + "0123/4" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "012\xff/" + "0123456789" * 10)
self._test_ustar_name("0123456789" * 15 + "0123\xff/" + "0123456789" * 10, ValueError)
def test_unicode_longname2(self):
self._test_ustar_name("0123456789" * 15 + "01\xff/2" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "01\xff\xff/" + "0123456789" * 10, ValueError)
def test_unicode_longname3(self):
self._test_ustar_name("0123456789" * 15 + "01\xff\xff/2" + "0123456789" * 10, ValueError)
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "01234567\xff")
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_longname4(self):
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "012345\xff\xff")
self._test_ustar_name("0123456789" * 15 + "01234/" + "0123456789" * 9 + "0123456\xff\xff", ValueError)
def _test_ustar_name(self, name, exc=None):
with tarfile.open(tmpname, "w", format=self.format, encoding="utf-8") as tar:
t = tarfile.TarInfo(name)
if exc is None:
tar.addfile(t)
else:
self.assertRaises(exc, tar.addfile, t)
if exc is None:
with tarfile.open(tmpname, "r", encoding="utf-8") as tar:
for t in tar:
self.assertEqual(name, t.name)
break
# Test the same as above for the 100 bytes link field.
def test_unicode_link1(self):
self._test_ustar_link("0123456789" * 10)
self._test_ustar_link("0123456789" * 10 + "0", ValueError)
self._test_ustar_link("0123456789" * 9 + "01234567\xff")
self._test_ustar_link("0123456789" * 9 + "012345678\xff", ValueError)
def test_unicode_link2(self):
self._test_ustar_link("0123456789" * 9 + "012345\xff\xff")
self._test_ustar_link("0123456789" * 9 + "0123456\xff\xff", ValueError)
def _test_ustar_link(self, name, exc=None):
with tarfile.open(tmpname, "w", format=self.format, encoding="utf-8") as tar:
t = tarfile.TarInfo("foo")
t.linkname = name
if exc is None:
tar.addfile(t)
else:
self.assertRaises(exc, tar.addfile, t)
if exc is None:
with tarfile.open(tmpname, "r", encoding="utf-8") as tar:
for t in tar:
self.assertEqual(name, t.linkname)
break
class GNUUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.GNU_FORMAT
def test_bad_pax_header(self):
# Test for issue #8633. GNU tar <= 1.23 creates raw binary fields
# without a hdrcharset=BINARY header.
for encoding, name in (
("utf-8", "pax/bad-pax-\udce4\udcf6\udcfc"),
("iso8859-1", "pax/bad-pax-\xe4\xf6\xfc"),):
with tarfile.open(tarname, encoding=encoding,
errors="surrogateescape") as tar:
try:
t = tar.getmember(name)
except KeyError:
self.fail("unable to read bad GNU tar pax header")
class PAXUnicodeTest(UnicodeTest, unittest.TestCase):
format = tarfile.PAX_FORMAT
# PAX_FORMAT ignores encoding in write mode.
test_unicode_filename_error = None
def test_binary_header(self):
# Test a POSIX.1-2008 compatible header with a hdrcharset=BINARY field.
for encoding, name in (
("utf-8", "pax/hdrcharset-\udce4\udcf6\udcfc"),
("iso8859-1", "pax/hdrcharset-\xe4\xf6\xfc"),):
with tarfile.open(tarname, encoding=encoding,
errors="surrogateescape") as tar:
try:
t = tar.getmember(name)
except KeyError:
self.fail("unable to read POSIX.1-2008 binary header")
class AppendTestBase:
# Test append mode (cp. patch #1652681).
def setUp(self):
self.tarname = tmpname
if os.path.exists(self.tarname):
support.unlink(self.tarname)
def _create_testtar(self, mode="w:"):
with tarfile.open(tarname, encoding="iso8859-1") as src:
t = src.getmember("ustar/regtype")
t.name = "foo"
with src.extractfile(t) as f:
with tarfile.open(self.tarname, mode) as tar:
tar.addfile(t, f)
def test_append_compressed(self):
self._create_testtar("w:" + self.suffix)
self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, "a")
class AppendTest(AppendTestBase, unittest.TestCase):
test_append_compressed = None
def _add_testfile(self, fileobj=None):
with tarfile.open(self.tarname, "a", fileobj=fileobj) as tar:
tar.addfile(tarfile.TarInfo("bar"))
def _test(self, names=["bar"], fileobj=None):
with tarfile.open(self.tarname, fileobj=fileobj) as tar:
self.assertEqual(tar.getnames(), names)
def test_non_existing(self):
self._add_testfile()
self._test()
def test_empty(self):
tarfile.open(self.tarname, "w:").close()
self._add_testfile()
self._test()
def test_empty_fileobj(self):
fobj = io.BytesIO(b"\0" * 1024)
self._add_testfile(fobj)
fobj.seek(0)
self._test(fileobj=fobj)
def test_fileobj(self):
self._create_testtar()
with open(self.tarname, "rb") as fobj:
data = fobj.read()
fobj = io.BytesIO(data)
self._add_testfile(fobj)
fobj.seek(0)
self._test(names=["foo", "bar"], fileobj=fobj)
def test_existing(self):
self._create_testtar()
self._add_testfile()
self._test(names=["foo", "bar"])
# Append mode is supposed to fail if the tarfile to append to
# does not end with a zero block.
def _test_error(self, data):
with open(self.tarname, "wb") as fobj:
fobj.write(data)
self.assertRaises(tarfile.ReadError, self._add_testfile)
def test_null(self):
self._test_error(b"")
def test_incomplete(self):
self._test_error(b"\0" * 13)
def test_premature_eof(self):
data = tarfile.TarInfo("foo").tobuf()
self._test_error(data)
def test_trailing_garbage(self):
data = tarfile.TarInfo("foo").tobuf()
self._test_error(data + b"\0" * 13)
def test_invalid(self):
self._test_error(b"a" * 512)
class GzipAppendTest(GzipTest, AppendTestBase, unittest.TestCase):
pass
class Bz2AppendTest(Bz2Test, AppendTestBase, unittest.TestCase):
pass
class LzmaAppendTest(LzmaTest, AppendTestBase, unittest.TestCase):
pass
class LimitsTest(unittest.TestCase):
def test_ustar_limits(self):
# 100 char name
tarinfo = tarfile.TarInfo("0123456789" * 10)
tarinfo.tobuf(tarfile.USTAR_FORMAT)
# 101 char name that cannot be stored
tarinfo = tarfile.TarInfo("0123456789" * 10 + "0")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 256 char name with a slash at pos 156
tarinfo = tarfile.TarInfo("123/" * 62 + "longname")
tarinfo.tobuf(tarfile.USTAR_FORMAT)
# 256 char name that cannot be stored
tarinfo = tarfile.TarInfo("1234567/" * 31 + "longname")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 512 char name
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# 512 char linkname
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
# uid > 8 digits
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o10000000
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.USTAR_FORMAT)
def test_gnu_limits(self):
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
tarinfo.tobuf(tarfile.GNU_FORMAT)
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
tarinfo.tobuf(tarfile.GNU_FORMAT)
# uid >= 256 ** 7
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o4000000000000000000
self.assertRaises(ValueError, tarinfo.tobuf, tarfile.GNU_FORMAT)
def test_pax_limits(self):
tarinfo = tarfile.TarInfo("123/" * 126 + "longname")
tarinfo.tobuf(tarfile.PAX_FORMAT)
tarinfo = tarfile.TarInfo("longlink")
tarinfo.linkname = "123/" * 126 + "longname"
tarinfo.tobuf(tarfile.PAX_FORMAT)
tarinfo = tarfile.TarInfo("name")
tarinfo.uid = 0o4000000000000000000
tarinfo.tobuf(tarfile.PAX_FORMAT)
class MiscTest(unittest.TestCase):
def test_char_fields(self):
self.assertEqual(tarfile.stn("foo", 8, "ascii", "strict"),
b"foo\0\0\0\0\0")
self.assertEqual(tarfile.stn("foobar", 3, "ascii", "strict"),
b"foo")
self.assertEqual(tarfile.nts(b"foo\0\0\0\0\0", "ascii", "strict"),
"foo")
self.assertEqual(tarfile.nts(b"foo\0bar\0", "ascii", "strict"),
"foo")
def test_read_number_fields(self):
# Issue 13158: Test if GNU tar specific base-256 number fields
# are decoded correctly.
self.assertEqual(tarfile.nti(b"0000001\x00"), 1)
self.assertEqual(tarfile.nti(b"7777777\x00"), 0o7777777)
self.assertEqual(tarfile.nti(b"\x80\x00\x00\x00\x00\x20\x00\x00"),
0o10000000)
self.assertEqual(tarfile.nti(b"\x80\x00\x00\x00\xff\xff\xff\xff"),
0xffffffff)
self.assertEqual(tarfile.nti(b"\xff\xff\xff\xff\xff\xff\xff\xff"),
-1)
self.assertEqual(tarfile.nti(b"\xff\xff\xff\xff\xff\xff\xff\x9c"),
-100)
self.assertEqual(tarfile.nti(b"\xff\x00\x00\x00\x00\x00\x00\x00"),
-0x100000000000000)
# Issue 24514: Test if empty number fields are converted to zero.
self.assertEqual(tarfile.nti(b"\0"), 0)
self.assertEqual(tarfile.nti(b" \0"), 0)
def test_write_number_fields(self):
self.assertEqual(tarfile.itn(1), b"0000001\x00")
self.assertEqual(tarfile.itn(0o7777777), b"7777777\x00")
self.assertEqual(tarfile.itn(0o10000000),
b"\x80\x00\x00\x00\x00\x20\x00\x00")
self.assertEqual(tarfile.itn(0xffffffff),
b"\x80\x00\x00\x00\xff\xff\xff\xff")
self.assertEqual(tarfile.itn(-1),
b"\xff\xff\xff\xff\xff\xff\xff\xff")
self.assertEqual(tarfile.itn(-100),
b"\xff\xff\xff\xff\xff\xff\xff\x9c")
self.assertEqual(tarfile.itn(-0x100000000000000),
b"\xff\x00\x00\x00\x00\x00\x00\x00")
def test_number_field_limits(self):
with self.assertRaises(ValueError):
tarfile.itn(-1, 8, tarfile.USTAR_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(0o10000000, 8, tarfile.USTAR_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(-0x10000000001, 6, tarfile.GNU_FORMAT)
with self.assertRaises(ValueError):
tarfile.itn(0x10000000000, 6, tarfile.GNU_FORMAT)
def test__all__(self):
blacklist = {'version', 'grp', 'pwd', 'symlink_exception',
'NUL', 'BLOCKSIZE', 'RECORDSIZE', 'GNU_MAGIC',
'POSIX_MAGIC', 'LENGTH_NAME', 'LENGTH_LINK',
'LENGTH_PREFIX', 'REGTYPE', 'AREGTYPE', 'LNKTYPE',
'SYMTYPE', 'CHRTYPE', 'BLKTYPE', 'DIRTYPE', 'FIFOTYPE',
'CONTTYPE', 'GNUTYPE_LONGNAME', 'GNUTYPE_LONGLINK',
'GNUTYPE_SPARSE', 'XHDTYPE', 'XGLTYPE', 'SOLARIS_XHDTYPE',
'SUPPORTED_TYPES', 'REGULAR_TYPES', 'GNU_TYPES',
'PAX_FIELDS', 'PAX_NAME_FIELDS', 'PAX_NUMBER_FIELDS',
'stn', 'nts', 'nti', 'itn', 'calc_chksums', 'copyfileobj',
'filemode',
'EmptyHeaderError', 'TruncatedHeaderError',
'EOFHeaderError', 'InvalidHeaderError',
'SubsequentHeaderError', 'ExFileObject',
'main'}
support.check__all__(self, tarfile, blacklist=blacklist)
class CommandLineTest(unittest.TestCase):
def tarfilecmd(self, *args, **kwargs):
rc, out, err = script_helper.assert_python_ok('-m', 'tarfile', *args,
**kwargs)
return out.replace(os.linesep.encode(), b'\n')
def tarfilecmd_failure(self, *args):
return script_helper.assert_python_failure('-m', 'tarfile', *args)
def make_simple_tarfile(self, tar_name):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
self.addCleanup(support.unlink, tar_name)
with tarfile.open(tar_name, 'w') as tf:
for tardata in files:
tf.add(tardata, arcname=os.path.basename(tardata))
def test_bad_use(self):
rc, out, err = self.tarfilecmd_failure()
self.assertEqual(out, b'')
self.assertIn(b'usage', err.lower())
self.assertIn(b'error', err.lower())
self.assertIn(b'required', err.lower())
rc, out, err = self.tarfilecmd_failure('-l', '')
self.assertEqual(out, b'')
self.assertNotEqual(err.strip(), b'')
def test_test_command(self):
for tar_name in testtarnames:
for opt in '-t', '--test':
out = self.tarfilecmd(opt, tar_name)
self.assertEqual(out, b'')
def test_test_command_verbose(self):
for tar_name in testtarnames:
for opt in '-v', '--verbose':
out = self.tarfilecmd(opt, '-t', tar_name)
self.assertIn(b'is a tar archive.\n', out)
def test_test_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
rc, out, err = self.tarfilecmd_failure('-t', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
for tar_name in testtarnames:
with self.subTest(tar_name=tar_name):
with open(tar_name, 'rb') as f:
data = f.read()
try:
with open(tmpname, 'wb') as f:
f.write(data[:511])
rc, out, err = self.tarfilecmd_failure('-t', tmpname)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
finally:
support.unlink(tmpname)
def test_list_command(self):
for tar_name in testtarnames:
with support.captured_stdout() as t:
with tarfile.open(tar_name, 'r') as tf:
tf.list(verbose=False)
expected = t.getvalue().encode('ascii', 'backslashreplace')
for opt in '-l', '--list':
out = self.tarfilecmd(opt, tar_name,
PYTHONIOENCODING='ascii')
self.assertEqual(out, expected)
def test_list_command_verbose(self):
for tar_name in testtarnames:
with support.captured_stdout() as t:
with tarfile.open(tar_name, 'r') as tf:
tf.list(verbose=True)
expected = t.getvalue().encode('ascii', 'backslashreplace')
for opt in '-v', '--verbose':
out = self.tarfilecmd(opt, '-l', tar_name,
PYTHONIOENCODING='ascii')
self.assertEqual(out, expected)
def test_list_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
rc, out, err = self.tarfilecmd_failure('-l', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
def test_create_command(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for opt in '-c', '--create':
try:
out = self.tarfilecmd(opt, tmpname, *files)
self.assertEqual(out, b'')
with tarfile.open(tmpname) as tar:
tar.getmembers()
finally:
support.unlink(tmpname)
def test_create_command_verbose(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for opt in '-v', '--verbose':
try:
out = self.tarfilecmd(opt, '-c', tmpname, *files)
self.assertIn(b' file created.', out)
with tarfile.open(tmpname) as tar:
tar.getmembers()
finally:
support.unlink(tmpname)
def test_create_command_dotless_filename(self):
files = [support.findfile('tokenize_tests.txt')]
try:
out = self.tarfilecmd('-c', dotlessname, *files)
self.assertEqual(out, b'')
with tarfile.open(dotlessname) as tar:
tar.getmembers()
finally:
support.unlink(dotlessname)
def test_create_command_dot_started_filename(self):
tar_name = os.path.join(TEMPDIR, ".testtar")
files = [support.findfile('tokenize_tests.txt')]
try:
out = self.tarfilecmd('-c', tar_name, *files)
self.assertEqual(out, b'')
with tarfile.open(tar_name) as tar:
tar.getmembers()
finally:
support.unlink(tar_name)
def test_create_command_compressed(self):
files = [support.findfile('tokenize_tests.txt'),
support.findfile('tokenize_tests-no-coding-cookie-'
'and-utf8-bom-sig-only.txt')]
for filetype in (GzipTest, Bz2Test, LzmaTest):
if not filetype.open:
continue
try:
tar_name = tmpname + '.' + filetype.suffix
out = self.tarfilecmd('-c', tar_name, *files)
with filetype.taropen(tar_name) as tar:
tar.getmembers()
finally:
support.unlink(tar_name)
def test_extract_command(self):
self.make_simple_tarfile(tmpname)
for opt in '-e', '--extract':
try:
with support.temp_cwd(tarextdir):
out = self.tarfilecmd(opt, tmpname)
self.assertEqual(out, b'')
finally:
support.rmtree(tarextdir)
def test_extract_command_verbose(self):
self.make_simple_tarfile(tmpname)
for opt in '-v', '--verbose':
try:
with support.temp_cwd(tarextdir):
out = self.tarfilecmd(opt, '-e', tmpname)
self.assertIn(b' file is extracted.', out)
finally:
support.rmtree(tarextdir)
def test_extract_command_different_directory(self):
self.make_simple_tarfile(tmpname)
try:
with support.temp_cwd(tarextdir):
out = self.tarfilecmd('-e', tmpname, 'spamdir')
self.assertEqual(out, b'')
finally:
support.rmtree(tarextdir)
def test_extract_command_invalid_file(self):
zipname = support.findfile('zipdir.zip')
with support.temp_cwd(tarextdir):
rc, out, err = self.tarfilecmd_failure('-e', zipname)
self.assertIn(b' is not a tar archive.', err)
self.assertEqual(out, b'')
self.assertEqual(rc, 1)
class ContextManagerTest(unittest.TestCase):
def test_basic(self):
with tarfile.open(tarname) as tar:
self.assertFalse(tar.closed, "closed inside runtime context")
self.assertTrue(tar.closed, "context manager failed")
def test_closed(self):
# The __enter__() method is supposed to raise OSError
# if the TarFile object is already closed.
tar = tarfile.open(tarname)
tar.close()
with self.assertRaises(OSError):
with tar:
pass
def test_exception(self):
# Test if the OSError exception is passed through properly.
with self.assertRaises(Exception) as exc:
with tarfile.open(tarname) as tar:
raise OSError
self.assertIsInstance(exc.exception, OSError,
"wrong exception raised in context manager")
self.assertTrue(tar.closed, "context manager failed")
def test_no_eof(self):
# __exit__() must not write end-of-archive blocks if an
# exception was raised.
try:
with tarfile.open(tmpname, "w") as tar:
raise Exception
except:
pass
self.assertEqual(os.path.getsize(tmpname), 0,
"context manager wrote an end-of-archive block")
self.assertTrue(tar.closed, "context manager failed")
def test_eof(self):
# __exit__() must write end-of-archive blocks, i.e. call
# TarFile.close() if there was no error.
with tarfile.open(tmpname, "w"):
pass
self.assertNotEqual(os.path.getsize(tmpname), 0,
"context manager wrote no end-of-archive block")
def test_fileobj(self):
# Test that __exit__() did not close the external file
# object.
with open(tmpname, "wb") as fobj:
try:
with tarfile.open(fileobj=fobj, mode="w") as tar:
raise Exception
except:
pass
self.assertFalse(fobj.closed, "external file object was closed")
self.assertTrue(tar.closed, "context manager failed")
@unittest.skipIf(hasattr(os, "link"), "requires os.link to be missing")
class LinkEmulationTest(ReadTest, unittest.TestCase):
# Test for issue #8741 regression. On platforms that do not support
# symbolic or hard links tarfile tries to extract these types of members
# as the regular files they point to.
def _test_link_extraction(self, name):
self.tar.extract(name, TEMPDIR)
with open(os.path.join(TEMPDIR, name), "rb") as f:
data = f.read()
self.assertEqual(md5sum(data), md5_regtype)
# See issues #1578269, #8879, and #17689 for some history on these skips
@unittest.skipIf(hasattr(os.path, "islink"),
"Skip emulation - has os.path.islink but not os.link")
def test_hardlink_extraction1(self):
self._test_link_extraction("ustar/lnktype")
@unittest.skipIf(hasattr(os.path, "islink"),
"Skip emulation - has os.path.islink but not os.link")
def test_hardlink_extraction2(self):
self._test_link_extraction("./ustar/linktest2/lnktype")
@unittest.skipIf(hasattr(os, "symlink"),
"Skip emulation if symlink exists")
def test_symlink_extraction1(self):
self._test_link_extraction("ustar/symtype")
@unittest.skipIf(hasattr(os, "symlink"),
"Skip emulation if symlink exists")
def test_symlink_extraction2(self):
self._test_link_extraction("./ustar/linktest2/symtype")
class Bz2PartialReadTest(Bz2Test, unittest.TestCase):
# Issue5068: The _BZ2Proxy.read() method loops forever
# on an empty or partial bzipped file.
def _test_partial_input(self, mode):
class MyBytesIO(io.BytesIO):
hit_eof = False
def read(self, n):
if self.hit_eof:
raise AssertionError("infinite loop detected in "
"tarfile.open()")
self.hit_eof = self.tell() == len(self.getvalue())
return super(MyBytesIO, self).read(n)
def seek(self, *args):
self.hit_eof = False
return super(MyBytesIO, self).seek(*args)
data = bz2.compress(tarfile.TarInfo("foo").tobuf())
for x in range(len(data) + 1):
try:
tarfile.open(fileobj=MyBytesIO(data[:x]), mode=mode)
except tarfile.ReadError:
pass # we have no interest in ReadErrors
def test_partial_input(self):
self._test_partial_input("r")
def test_partial_input_bz2(self):
self._test_partial_input("r:bz2")
def root_is_uid_gid_0():
try:
import pwd, grp
except ImportError:
return False
if pwd.getpwuid(0)[0] != 'root':
return False
if grp.getgrgid(0)[0] != 'root':
return False
return True
@unittest.skipUnless(hasattr(os, 'chown'), "missing os.chown")
@unittest.skipUnless(hasattr(os, 'geteuid'), "missing os.geteuid")
class NumericOwnerTest(unittest.TestCase):
# mock the following:
# os.chown: so we can test what's being called
# os.chmod: so the modes are not actually changed. if they are, we can't
# delete the files/directories
# os.geteuid: so we can lie and say we're root (uid = 0)
@staticmethod
def _make_test_archive(filename_1, dirname_1, filename_2):
# the file contents to write
fobj = io.BytesIO(b"content")
# create a tar file with a file, a directory, and a file within that
# directory. Assign various .uid/.gid values to them
items = [(filename_1, 99, 98, tarfile.REGTYPE, fobj),
(dirname_1, 77, 76, tarfile.DIRTYPE, None),
(filename_2, 88, 87, tarfile.REGTYPE, fobj),
]
with tarfile.open(tmpname, 'w') as tarfl:
for name, uid, gid, typ, contents in items:
t = tarfile.TarInfo(name)
t.uid = uid
t.gid = gid
t.uname = 'root'
t.gname = 'root'
t.type = typ
tarfl.addfile(t, contents)
# return the full pathname to the tar file
return tmpname
@staticmethod
@contextmanager
def _setup_test(mock_geteuid):
mock_geteuid.return_value = 0 # lie and say we're root
fname = 'numeric-owner-testfile'
dirname = 'dir'
# the names we want stored in the tarfile
filename_1 = fname
dirname_1 = dirname
filename_2 = os.path.join(dirname, fname)
# create the tarfile with the contents we're after
tar_filename = NumericOwnerTest._make_test_archive(filename_1,
dirname_1,
filename_2)
# open the tarfile for reading. yield it and the names of the items
# we stored into the file
with tarfile.open(tar_filename) as tarfl:
yield tarfl, filename_1, dirname_1, filename_2
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extract_with_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _,
filename_2):
tarfl.extract(filename_1, TEMPDIR, numeric_owner=True)
tarfl.extract(filename_2 , TEMPDIR, numeric_owner=True)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
f_filename_2 = os.path.join(TEMPDIR, filename_2)
mock_chown.assert_has_calls([unittest.mock.call(f_filename_1, 99, 98),
unittest.mock.call(f_filename_2, 88, 87),
],
any_order=True)
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extractall_with_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, dirname_1,
filename_2):
tarfl.extractall(TEMPDIR, numeric_owner=True)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
f_dirname_1 = os.path.join(TEMPDIR, dirname_1)
f_filename_2 = os.path.join(TEMPDIR, filename_2)
mock_chown.assert_has_calls([unittest.mock.call(f_filename_1, 99, 98),
unittest.mock.call(f_dirname_1, 77, 76),
unittest.mock.call(f_filename_2, 88, 87),
],
any_order=True)
# this test requires that uid=0 and gid=0 really be named 'root'. that's
# because the uname and gname in the test file are 'root', and extract()
# will look them up using pwd and grp to find their uid and gid, which we
# test here to be 0.
@unittest.skipUnless(root_is_uid_gid_0(),
'uid=0,gid=0 must be named "root"')
@unittest.mock.patch('os.chown')
@unittest.mock.patch('os.chmod')
@unittest.mock.patch('os.geteuid')
def test_extract_without_numeric_owner(self, mock_geteuid, mock_chmod,
mock_chown):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _, _):
tarfl.extract(filename_1, TEMPDIR, numeric_owner=False)
# convert to filesystem paths
f_filename_1 = os.path.join(TEMPDIR, filename_1)
mock_chown.assert_called_with(f_filename_1, 0, 0)
@unittest.mock.patch('os.geteuid')
def test_keyword_only(self, mock_geteuid):
with self._setup_test(mock_geteuid) as (tarfl, filename_1, _, _):
self.assertRaises(TypeError,
tarfl.extract, filename_1, TEMPDIR, False, True)
def setUpModule():
support.unlink(TEMPDIR)
os.makedirs(TEMPDIR)
global testtarnames
testtarnames = [tarname]
with open(tarname, "rb") as fobj:
data = fobj.read()
# Create compressed tarfiles.
for c in GzipTest, Bz2Test, LzmaTest:
if c.open:
support.unlink(c.tarname)
testtarnames.append(c.tarname)
with c.open(c.tarname, "wb") as tar:
tar.write(data)
def tearDownModule():
if os.path.exists(TEMPDIR):
support.rmtree(TEMPDIR)
if __name__ == "__main__":
unittest.main()
|
"""
ASGI config for newspaper_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'newspaper_project.settings')
application = get_asgi_application()
|
# Generated by Django 3.2.4 on 2021-10-07 08:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_alter_user_avatar'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, default='img/avatar.png', null=True, upload_to='avatar', verbose_name='Avatar'),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(default=models.EmailField(max_length=255, unique=True, verbose_name='Adresse Email'), max_length=200, unique=True),
),
]
|
# -*- coding: utf-8 -*-
"""
learn-list
"""
import math
class StatsList(list):
""" lazy eval """
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
@property
def mean(self):
return sum(self) / len(self)
@property
def stdev(self):
n = len(self)
return math.sqrt( n*sum(x**2 for x in self)-sum(self)**2)/n
x=StatsList([1,2,3])
x.mean
x.stdev
#==============================================================================
#
#==============================================================================
class StatsList2(list):
"""Eager Stats."""
def __init__( self, *args, **kw ):
self.sum0 = 0 # len(self)
self.sum1 = 0 # sum(self)
self.sum2 = 0 # sum(x**2 for x in self)
super().__init__( *args, **kw )
for x in self:
self._new(x)
def _new( self, value ):
self.sum0 += 1
self.sum1 += value
self.sum2 += value*value
def _rmv( self, value ):
self.sum0 -= 1
self.sum1 -= value
self.sum2 -= value*value
def insert( self, index, value ):
super().insert( index, value )
self._new(value)
def pop( self, index=0 ):
value= super().pop( index )
self._rmv(value)
return value
@property
def mean(self):
return self.sum1/self.sum0
@property
def stdev(self):
return math.sqrt( self.sum0*self.sum2-self.sum1*self.sum1
)/self.sum0
def __setitem__( self, index, value ):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
olds = [ self[i] for i in range(start,stop,step) ]
super().__setitem__( index, value )
for x in olds:
self._rmv(x)
for x in value:
self._new(x)
else: # int
old= self[index]
super().__setitem__( index, value )
self._rmv(old)
self._new(value)
def __delitem__( self, index ):
# Index may be a single integer, or a slice
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
olds = [ self[i] for i in range(start,stop,step) ]
super().__delitem__( index )
for x in olds:
self._rmv(x)
else:
old = self[index]
super().__delitem__( index )
self._rmv(old)
x=StatsList2([1,2,3])
x.stdev
#==============================================================================
#
#==============================================================================
class Explore(list):
def __getitem__( self, index ):
print(type(index))
print(dir(index))
print( index, index.indices(len(self)) )
return super().__getitem__( index )
x=Explore("1bcde")
x[0:3:2]
x[:]
x[:-1]
slice
#==============================================================================
#
#==============================================================================
|
"""
A modern, Python3-compatible, well-documented library for communicating
with a MineCraft server.
"""
from collections import OrderedDict, namedtuple
import re
# The version number of the most recent pyCraft release.
__version__ = "0.7.0"
# This bit occurs in the protocol numbers of pre-release versions after 1.16.3.
PRE = 1 << 30
# A record representing a Minecraft version in the following list.
Version = namedtuple('Version', ('id', 'protocol', 'supported'))
# A list of Minecraft versions known to pyCraft, including all supported
# versions as well as some unsupported versions (used by certain forward-
# compatible code: e.g. when comparing the current protocol version with that
# of an unsupported version), in chronological order of publication.
#
# The ID string of a version is the key used to identify it in
# <https://launchermeta.mojang.com/mc/game/version_manifest.json>, or the 'id'
# key in "version.json" in the corresponding ".jar" file distributed by Mojang.
KNOWN_MINECRAFT_VERSION_RECORDS = [
# id protocol supported
Version('13w41a', 0, False),
Version('13w41b', 0, False),
Version('13w42a', 1, False),
Version('13w42b', 1, False),
Version('13w43a', 2, False),
Version('1.7-pre', 3, False),
Version('1.7.1-pre', 3, False),
Version('1.7.2', 4, True),
Version('13w47a', 4, False),
Version('13w47b', 4, False),
Version('13w47c', 4, False),
Version('13w47d', 4, False),
Version('13w47e', 4, False),
Version('13w48a', 4, False),
Version('13w48b', 4, False),
Version('13w49a', 4, False),
Version('1.7.3-pre', 4, False),
Version('1.7.4', 4, True),
Version('1.7.5', 4, True),
Version('1.7.6-pre1', 5, False),
Version('1.7.6-pre2', 5, False),
Version('1.7.6', 5, True),
Version('1.7.7', 5, True),
Version('1.7.8', 5, True),
Version('1.7.9', 5, True),
Version('1.7.10-pre1', 5, False),
Version('1.7.10-pre2', 5, False),
Version('1.7.10-pre3', 5, False),
Version('1.7.10-pre4', 5, False),
Version('1.7.10', 5, True),
Version('14w02a', 5, False),
Version('14w02b', 5, False),
Version('14w02c', 5, False),
Version('14w03a', 6, False),
Version('14w03b', 6, False),
Version('14w04a', 7, False),
Version('14w04b', 8, False),
Version('14w05a', 9, False),
Version('14w05b', 9, False),
Version('14w06a', 10, False),
Version('14w06b', 10, False),
Version('14w07a', 11, False),
Version('14w08a', 12, False),
Version('14w10a', 13, False),
Version('14w10b', 13, False),
Version('14w10c', 13, False),
Version('14w11a', 14, False),
Version('14w11b', 14, False),
Version('14w17a', 15, False),
Version('14w18a', 16, False),
Version('14w18b', 16, False),
Version('14w19a', 17, False),
Version('14w20a', 18, False),
Version('14w20b', 18, False),
Version('14w21a', 19, False),
Version('14w21b', 20, False),
Version('14w25a', 21, False),
Version('14w25b', 22, False),
Version('14w26a', 23, False),
Version('14w26b', 24, False),
Version('14w26c', 25, False),
Version('14w27a', 26, False),
Version('14w27b', 26, False),
Version('14w28a', 27, False),
Version('14w28b', 28, False),
Version('14w29a', 29, False),
Version('14w29a', 29, False),
Version('14w30a', 30, False),
Version('14w30b', 30, False),
Version('14w30c', 31, False),
Version('14w31a', 32, False),
Version('14w32a', 33, False),
Version('14w32b', 34, False),
Version('14w32c', 35, False),
Version('14w32d', 36, False),
Version('14w33a', 37, False),
Version('14w33b', 38, False),
Version('14w33c', 39, False),
Version('14w34a', 40, False),
Version('14w34b', 41, False),
Version('14w34c', 42, False),
Version('14w34d', 43, False),
Version('1.8-pre1', 44, False),
Version('1.8-pre2', 45, False),
Version('1.8-pre3', 46, False),
Version('1.8', 47, True),
Version('1.8.1-pre1', 47, False),
Version('1.8.1-pre2', 47, False),
Version('1.8.1-pre3', 47, False),
Version('1.8.1-pre4', 47, False),
Version('1.8.1-pre5', 47, False),
Version('1.8.1', 47, True),
Version('1.8.2-pre1', 47, False),
Version('1.8.2-pre2', 47, False),
Version('1.8.2-pre3', 47, False),
Version('1.8.2-pre4', 47, False),
Version('1.8.2-pre5', 47, False),
Version('1.8.2-pre6', 47, False),
Version('1.8.2-pre7', 47, False),
Version('1.8.2', 47, True),
Version('1.8.3', 47, True),
Version('1.8.4', 47, True),
Version('1.8.5', 47, True),
Version('1.8.6', 47, True),
Version('1.8.7', 47, True),
Version('1.8.8', 47, True),
Version('1.8.9', 47, True),
Version('15w14a', 48, False),
Version('15w31a', 49, False),
Version('15w31b', 50, False),
Version('15w31c', 51, False),
Version('15w32a', 52, False),
Version('15w32b', 53, False),
Version('15w32c', 54, False),
Version('15w33a', 55, False),
Version('15w33b', 56, False),
Version('15w33c', 57, False),
Version('15w34a', 58, False),
Version('15w34b', 59, False),
Version('15w34c', 60, False),
Version('15w34d', 61, False),
Version('15w35a', 62, False),
Version('15w35b', 63, False),
Version('15w35c', 64, False),
Version('15w35d', 65, False),
Version('15w35e', 66, False),
Version('15w36a', 67, False),
Version('15w36b', 68, False),
Version('15w36c', 69, False),
Version('15w36d', 70, False),
Version('15w37a', 71, False),
Version('15w38a', 72, False),
Version('15w38b', 73, False),
Version('15w39a', 74, False),
Version('15w39b', 74, False),
Version('15w39c', 74, False),
Version('15w40a', 75, False),
Version('15w40b', 76, False),
Version('15w41a', 77, False),
Version('15w41b', 78, False),
Version('15w42a', 79, False),
Version('15w43a', 80, False),
Version('15w43b', 81, False),
Version('15w43c', 82, False),
Version('15w44a', 83, False),
Version('15w44b', 84, False),
Version('15w45a', 85, False),
Version('15w46a', 86, False),
Version('15w47a', 87, False),
Version('15w47b', 88, False),
Version('15w47c', 89, False),
Version('15w49a', 90, False),
Version('15w49b', 91, False),
Version('15w50a', 92, False),
Version('15w51a', 93, False),
Version('15w51b', 94, False),
Version('16w02a', 95, False),
Version('16w03a', 96, False),
Version('16w04a', 97, False),
Version('16w05a', 98, False),
Version('16w05b', 99, False),
Version('16w06a', 100, False),
Version('16w07a', 101, False),
Version('16w07b', 102, False),
Version('1.9-pre1', 103, False),
Version('1.9-pre2', 104, False),
Version('1.9-pre3', 105, False),
Version('1.9-pre4', 106, False),
Version('1.9', 107, True),
Version('1.9.1-pre1', 107, False),
Version('1.9.1-pre2', 108, False),
Version('1.9.1-pre3', 108, False),
Version('1.9.1', 108, True),
Version('1.RV-Pre1', 108, False),
Version('1.9.2', 109, True),
Version('16w14a', 109, False),
Version('16w15a', 109, False),
Version('16w15b', 109, False),
Version('1.9.3-pre1', 109, False),
Version('1.9.3-pre2', 110, False),
Version('1.9.3-pre3', 110, False),
Version('1.9.3', 110, True),
Version('1.9.4', 110, True),
Version('16w20a', 201, False),
Version('16w21a', 202, False),
Version('16w21b', 203, False),
Version('1.10-pre1', 204, False),
Version('1.10-pre2', 205, False),
Version('1.10', 210, True),
Version('1.10.1', 210, True),
Version('1.10.2', 210, True),
Version('16w32a', 301, True),
Version('16w32b', 302, True),
Version('16w33a', 303, True),
Version('16w35a', 304, True),
Version('16w36a', 305, True),
Version('16w38a', 306, True),
Version('16w39a', 307, True),
Version('16w39b', 308, True),
Version('16w39c', 309, True),
Version('16w40a', 310, True),
Version('16w41a', 311, True),
Version('16w42a', 312, True),
Version('16w43a', 313, True),
Version('16w44a', 313, True),
Version('1.11-pre1', 314, True),
Version('1.11', 315, True),
Version('16w50a', 316, True),
Version('1.11.1', 316, True),
Version('1.11.2', 316, True),
Version('17w06a', 317, True),
Version('17w13a', 318, True),
Version('17w13b', 319, True),
Version('17w14a', 320, True),
Version('17w15a', 321, True),
Version('17w16a', 322, True),
Version('17w16b', 323, True),
Version('17w17a', 324, True),
Version('17w17b', 325, True),
Version('17w18a', 326, True),
Version('17w18b', 327, True),
Version('1.12-pre1', 328, True),
Version('1.12-pre2', 329, True),
Version('1.12-pre3', 330, True),
Version('1.12-pre4', 331, True),
Version('1.12-pre5', 332, True),
Version('1.12-pre6', 333, True),
Version('1.12-pre7', 334, True),
Version('1.12', 335, True),
Version('17w31a', 336, True),
Version('1.12.1-pre1', 337, True),
Version('1.12.1', 338, True),
Version('1.12.2-pre1', 339, True),
Version('1.12.2-pre2', 339, True),
Version('1.12.2', 340, True),
Version('17w43a', 341, True),
Version('17w43b', 342, True),
Version('17w45a', 343, True),
Version('17w45b', 344, True),
Version('17w46a', 345, True),
Version('17w47a', 346, True),
Version('17w47b', 347, True),
Version('17w48a', 348, True),
Version('17w49a', 349, True),
Version('17w49b', 350, True),
Version('17w50a', 351, True),
Version('18w01a', 352, True),
Version('18w02a', 353, True),
Version('18w03a', 354, True),
Version('18w03b', 355, True),
Version('18w05a', 356, True),
Version('18w06a', 357, True),
Version('18w07a', 358, True),
Version('18w07b', 359, True),
Version('18w07c', 360, True),
Version('18w08a', 361, True),
Version('18w08b', 362, True),
Version('18w09a', 363, True),
Version('18w10a', 364, True),
Version('18w10b', 365, True),
Version('18w10c', 366, True),
Version('18w10d', 367, True),
Version('18w11a', 368, True),
Version('18w14a', 369, True),
Version('18w14b', 370, True),
Version('18w15a', 371, True),
Version('18w16a', 372, True),
Version('18w19a', 373, True),
Version('18w19b', 374, True),
Version('18w20a', 375, True),
Version('18w20b', 376, True),
Version('18w20c', 377, True),
Version('18w21a', 378, True),
Version('18w21b', 379, True),
Version('18w22a', 380, True),
Version('18w22b', 381, True),
Version('18w22c', 382, True),
Version('1.13-pre1', 383, True),
Version('1.13-pre2', 384, True),
Version('1.13-pre3', 385, True),
Version('1.13-pre4', 386, True),
Version('1.13-pre5', 387, True),
Version('1.13-pre6', 388, True),
Version('1.13-pre7', 389, True),
Version('1.13-pre8', 390, True),
Version('1.13-pre9', 391, True),
Version('1.13-pre10', 392, True),
Version('1.13', 393, True),
Version('18w30a', 394, True),
Version('18w30b', 395, True),
Version('18w31a', 396, True),
Version('18w32a', 397, True),
Version('18w33a', 398, True),
Version('1.13.1-pre1', 399, True),
Version('1.13.1-pre2', 400, True),
Version('1.13.1', 401, True),
Version('1.13.2-pre1', 402, True),
Version('1.13.2-pre2', 403, True),
Version('1.13.2', 404, True),
Version('18w43a', 441, True),
Version('18w43b', 441, True),
Version('18w43c', 442, True),
Version('18w44a', 443, True),
Version('18w45a', 444, True),
Version('18w46a', 445, True),
Version('18w47a', 446, True),
Version('18w47b', 447, True),
Version('18w48a', 448, True),
Version('18w48b', 449, True),
Version('18w49a', 450, True),
Version('18w50a', 451, True),
Version('19w02a', 452, True),
Version('19w03a', 453, True),
Version('19w03b', 454, True),
Version('19w03c', 455, True),
Version('19w04a', 456, True),
Version('19w04b', 457, True),
Version('19w05a', 458, True),
Version('19w06a', 459, True),
Version('19w07a', 460, True),
Version('19w08a', 461, True),
Version('19w08b', 462, True),
Version('19w09a', 463, True),
Version('19w11a', 464, True),
Version('19w11b', 465, True),
Version('19w12a', 466, True),
Version('19w12b', 467, True),
Version('19w13a', 468, True),
Version('19w13b', 469, True),
Version('19w14a', 470, True),
Version('19w14b', 471, True),
Version('1.14 Pre-Release 1', 472, True),
Version('1.14 Pre-Release 2', 473, True),
Version('1.14 Pre-Release 3', 474, True),
Version('1.14 Pre-Release 4', 475, True),
Version('1.14 Pre-Release 5', 476, True),
Version('1.14', 477, True),
Version('1.14.1 Pre-Release 1', 478, True),
Version('1.14.1 Pre-Release 2', 479, True),
Version('1.14.1', 480, True),
Version('1.14.2 Pre-Release 1', 481, True),
Version('1.14.2 Pre-Release 2', 482, True),
Version('1.14.2 Pre-Release 3', 483, True),
Version('1.14.2 Pre-Release 4', 484, True),
Version('1.14.2', 485, True),
Version('1.14.3-pre1', 486, True),
Version('1.14.3-pre2', 487, True),
Version('1.14.3-pre3', 488, True),
Version('1.14.3-pre4', 489, True),
Version('1.14.3', 490, True),
Version('1.14.4-pre1', 491, True),
Version('1.14.4-pre2', 492, True),
Version('1.14.4-pre3', 493, True),
Version('1.14.4-pre4', 494, True),
Version('1.14.4-pre5', 495, True),
Version('1.14.4-pre6', 496, True),
Version('1.14.4-pre7', 497, True),
Version('1.14.4', 498, True),
Version('19w34a', 550, True),
Version('19w35a', 551, True),
Version('19w36a', 552, True),
Version('19w37a', 553, True),
Version('19w38a', 554, True),
Version('19w38b', 555, True),
Version('19w39a', 556, True),
Version('19w40a', 557, True),
Version('19w41a', 558, True),
Version('19w42a', 559, True),
Version('19w44a', 560, True),
Version('19w45a', 561, True),
Version('19w45b', 562, True),
Version('19w46a', 563, True),
Version('19w46b', 564, True),
Version('1.15-pre1', 565, True),
Version('1.15-pre2', 566, True),
Version('1.15-pre3', 567, True),
Version('1.15-pre4', 569, True),
Version('1.15-pre5', 570, True),
Version('1.15-pre6', 571, True),
Version('1.15-pre7', 572, True),
Version('1.15', 573, True),
Version('1.15.1-pre1', 574, True),
Version('1.15.1', 575, True),
Version('1.15.2-pre1', 576, True),
Version('1.15.2-pre2', 577, True),
Version('1.15.2', 578, True),
Version('20w06a', 701, True),
Version('20w07a', 702, True),
Version('20w08a', 703, True),
Version('20w09a', 704, True),
Version('20w10a', 705, True),
Version('20w11a', 706, True),
Version('20w12a', 707, True),
Version('20w13a', 708, True),
Version('20w13b', 709, True),
Version('20w14a', 710, True),
Version('20w15a', 711, True),
Version('20w16a', 712, True),
Version('20w17a', 713, True),
Version('20w18a', 714, True),
Version('20w19a', 715, True),
Version('20w20a', 716, True),
Version('20w20b', 717, True),
Version('20w21a', 718, True),
Version('20w22a', 719, True),
Version('1.16-pre1', 721, True),
Version('1.16-pre2', 722, True),
Version('1.16-pre3', 725, True),
Version('1.16-pre4', 727, True),
Version('1.16-pre5', 729, True),
Version('1.16-pre6', 730, True),
Version('1.16-pre7', 732, True),
Version('1.16-pre8', 733, True),
Version('1.16-rc1', 734, True),
Version('1.16', 735, True),
Version('1.16.1', 736, True),
Version('20w27a', 738, True),
Version('20w28a', 740, True),
Version('20w29a', 741, True),
Version('20w30a', 743, True),
Version('1.16.2-pre1', 744, True),
Version('1.16.2-pre2', 746, True),
Version('1.16.2-pre3', 748, True),
Version('1.16.2-rc1', 749, True),
Version('1.16.2-rc2', 750, True),
Version('1.16.2', 751, True),
Version('1.16.3-rc1', 752, True),
Version('1.16.3', 753, True),
Version('1.16.4-pre1', PRE | 1, True),
Version('1.16.4-pre2', PRE | 2, True),
Version('1.16.4-rc1', PRE | 3, True),
Version('1.16.4', 754, True),
Version('20w45a', PRE | 5, True),
Version('20w46a', PRE | 6, True),
Version('20w48a', PRE | 7, True),
Version('1.17', 755, True),
Version('1.18', 757, True),
]
# An OrderedDict mapping the id string of each known Minecraft version to its
# protocol version number, in chronological order of release.
KNOWN_MINECRAFT_VERSIONS = OrderedDict()
# As KNOWN_MINECRAFT_VERSIONS, but only contains versions supported by pyCraft.
SUPPORTED_MINECRAFT_VERSIONS = OrderedDict()
# As SUPPORTED_MINECRAFT_VERSIONS, but only contains release versions.
RELEASE_MINECRAFT_VERSIONS = OrderedDict()
# A list of the protocol version numbers in KNOWN_MINECRAFT_VERSIONS
# in the same order (chronological) but without duplicates.
KNOWN_PROTOCOL_VERSIONS = []
# A list of the protocol version numbers in SUPPORTED_MINECRAFT_VERSIONS
# in the same order (chronological) but without duplicates.
SUPPORTED_PROTOCOL_VERSIONS = []
# A list of the protocol version numbers in RELEASE_MINECRAFT_VERSIONS
# in the same order (chronological) but without duplicates.
RELEASE_PROTOCOL_VERSIONS = []
# A dict mapping each protocol version number in KNOWN_PROTOCOL_VERSIONS to
# its index within this list (used for efficient comparison of protocol
# versions according to chronological release order).
PROTOCOL_VERSION_INDICES = {}
def initglobals(use_known_records=False):
'''Initialise the above global variables, using
'SUPPORTED_MINECRAFT_VERSIONS' as the source if 'use_known_records' is
False (for backward compatibility, this is the default behaviour), or
otherwise using 'KNOWN_MINECRAFT_VERSION_RECORDS' as the source.
This allows 'SUPPORTED_MINECRAFT_VERSIONS' or, respectively,
'KNOWN_MINECRAFT_VERSION_RECORDS' to be updated by the library user
during runtime and then the derived data to be updated as well, to allow
for dynamic version support. All updates are done by reference to allow
this to work elsewhere in the code.
'''
if use_known_records:
# Update the variables that depend on KNOWN_MINECRAFT_VERSION_RECORDS.
KNOWN_MINECRAFT_VERSIONS.clear()
KNOWN_PROTOCOL_VERSIONS.clear()
SUPPORTED_MINECRAFT_VERSIONS.clear()
PROTOCOL_VERSION_INDICES.clear()
for version in KNOWN_MINECRAFT_VERSION_RECORDS:
KNOWN_MINECRAFT_VERSIONS[version.id] = version.protocol
if version.protocol not in KNOWN_PROTOCOL_VERSIONS:
PROTOCOL_VERSION_INDICES[version.protocol] \
= len(KNOWN_PROTOCOL_VERSIONS)
KNOWN_PROTOCOL_VERSIONS.append(version.protocol)
if version.supported:
SUPPORTED_MINECRAFT_VERSIONS[version.id] = version.protocol
# Update the variables that depend on SUPPORTED_MINECRAFT_VERSIONS.
SUPPORTED_PROTOCOL_VERSIONS.clear()
RELEASE_MINECRAFT_VERSIONS.clear()
RELEASE_PROTOCOL_VERSIONS.clear()
for (version_id, protocol) in SUPPORTED_MINECRAFT_VERSIONS.items():
if re.match(r'\d+(\.\d+)+$', version_id):
RELEASE_MINECRAFT_VERSIONS[version_id] = protocol
if protocol not in RELEASE_PROTOCOL_VERSIONS:
RELEASE_PROTOCOL_VERSIONS.append(protocol)
if protocol not in SUPPORTED_PROTOCOL_VERSIONS:
SUPPORTED_PROTOCOL_VERSIONS.append(protocol)
initglobals(use_known_records=True)
|
"""
Main entry point for crypy
This should interface the command line with async functions
"""
# Running a pair in one process from command line
# Note the process interface is more complex to use, but more reliable.
# Goal is to provide a process interface (supporting death and rebirth)
# and internally using a usual python API.
# this __main__ file should provide all that's needed for the process interface to be used.
import click
import crypy.config
import crypy.euc
@click.group(name='bitmex', invoke_without_command=True)
@click.option('--test', default=True)
@click.pass_context
def bitmex(ctx, test=True):
bitmex_host = 'testnet.bitmex.com' if test else 'bitmex.com'
# TODO : command line options override config
bitmex_config = dict(crypy.config.config().items(bitmex_host))
ctx.bitmex = crypy.euc.ccxt.bitmex(bitmex_config)
if ctx.invoked_subcommand is None:
# default command
bitmex_balance(ctx)
# otherwise invoke the specified subcommand (default behavior)
@bitmex.command(name='balance')
@click.pass_context
def bitmex_balance(ctx):
"""Balance from bitmex"""
print(ctx.bitmex.fetch_balance())
@click.group(name='kraken', invoke_without_command=True)
# @click.option('--tier', default=3)
# @click.option('--retry', default=.5)
# @click.option('--crlsleep', default=5)
@click.pass_context
def kraken(ctx):
# TODO : command line options override config
kraken_config = dict(crypy.config.config().items('kraken.com'))
ctx.kraken = crypy.euc.ccxt.kraken(kraken_config)
if ctx.invoked_subcommand is None:
# default command
kraken_balance(ctx)
# otherwise invoke the specified subcommand (default behavior)
@kraken.command(name='balance')
@click.pass_context
def kraken_balance(ctx):
print(ctx.kraken.fetch_balance())
cli = click.CommandCollection(sources=[kraken, bitmex])
if __name__ == '__main__':
cli()
|
from Jumpscale import j
def create_wallet(bot):
explorerurl = j.clients.explorer.default.url
wallettype = "STD"
if "testnet" in explorerurl or "devnet" in explorerurl:
wallettype = "TEST"
name = bot.string_ask("Please provide your wallet name")
while j.clients.stellar.exists(name):
name = bot.string_ask("Wallet name already exists please choose another name")
wallet = j.clients.stellar.new(name=name, network=wallettype)
if wallettype == "TEST":
bot.md_show("Will register your wallet")
wallet.activate_through_friendbot()
else:
pass
bot.md_show("Will setup trustlines for TFT , FreeTFT and TFTA")
wallet.add_known_trustline("TFT")
wallet.add_known_trustline("FreeTFT")
wallet.add_known_trustline("TFTA")
res = f"""\
# Wallet has been created
Your address `{wallet.address}`
"""
bot.md_show(j.core.text.strip(res))
def manage_wallet(bot, walletname):
wallet = j.clients.stellar.get(name=walletname)
balances = wallet.get_balance()
msg = """\
**Address:** `{{wallet.address}}`
**Balances:**
{% for balance in balances.balances -%}
- {{balance.balance}} {{balance.asset_code}}
{% endfor %}
"""
bot.qrcode_show(
data=wallet.address,
title=f"Wallet info {wallet.name}",
msg=j.tools.jinja2.template_render(text=msg, wallet=wallet, balances=balances),
scale=5,
)
def chat(bot):
"""
"""
user_info = bot.user_info()
j.sal.reservation_chatflow.validate_user(user_info)
choices = ["Create new wallet"]
walletdict = {}
for wallet in j.clients.stellar.find():
msg = f"Manage wallet {wallet.name}"
choices.append(msg)
walletdict[msg] = wallet.name
choice = bot.single_choice("What would you like to do?", choices)
if choices.index(choice) == 0:
create_wallet(bot)
else:
manage_wallet(bot, walletdict[choice])
|
"""Metadata objects in support of submissions."""
from typing import Optional, List
from arxiv.taxonomy import Category
from dataclasses import dataclass, asdict, field
@dataclass
class Classification:
"""A classification for a :class:`.domain.submission.Submission`."""
category: Category
@dataclass
class License:
"""An license for distribution of the submission."""
uri: str
name: Optional[str] = None
|
from test_plus.test import TestCase
from nrlm.league.models import *
class PlayerModelTest(TestCase):
def test_string_representation(self):
player = Player(name='Test User')
self.assertEqual(str(player), player.name)
class EventModelTest(TestCase):
def test_string_representation(self):
event = Event(name='Test Event')
self.assertEqual(str(event), event.name)
class IdentityModelTest(TestCase):
def test_string_representation(self):
identity = Identity(name='Test Identity')
self.assertEqual(str(identity), identity.name)
class GameModelTest(TestCase):
def test_cannot_save_same_players(self):
player = Player.objects.create(name='Test User 1')
identity1 = Identity.objects.create(name='Identity 1')
identity2 = Identity.objects.create(name='Identity 2')
event = Event.objects.create(name='Test Event 1')
with self.assertRaises(Exception) as cm:
Game.objects.create(
player = Player.objects.get(name='Test User 1'),
identity = Identity.objects.get(name='Identity 1'),
is_corp = False,
played_against_player = Player.objects.get(name='Test User 1'),
played_against_identity = Identity.objects.get(name='Identity 2'),
points = 0,
round_num = 1,
event = Event.objects.get(name='Test Event 1')
)
self.assertEqual(str(cm.exception),
'Attempted to have same two players play each other')
def test_cannot_save_same_identities(self):
player1 = Player.objects.create(name='Test User 1')
player2 = Player.objects.create(name='Test User 2')
identity = Identity.objects.create(name='Problem')
event = Event.objects.create(name='Test Event 1')
with self.assertRaises(Exception) as cm:
Game.objects.create(
player = Player.objects.get(name='Test User 1'),
identity = Identity.objects.get(name='Problem'),
is_corp=False,
played_against_player = Player.objects.get(name='Test User 2'),
played_against_identity = Identity.objects.get(name='Problem'),
points=0,
round_num=1,
event=Event.objects.get(name='Test Event 1')
)
self.assertEqual(str(cm.exception),
'Attempted to have the same identities play each other')
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Network REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.network.v2018_04_01.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param service_specification: Specification of the service.
:type service_specification:
~azure.mgmt.network.v2018_04_01.models.OperationPropertiesFormatServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'OperationPropertiesFormatServiceSpecification'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.service_specification = kwargs.get('service_specification', None)
|
#Zachary Weeden
#@zweed4u displays others whom have a similar schedule as you on RIT MyCourses
#Tested on Python 2.6.6
import mechanize
import urllib
import cookielib
from bs4 import BeautifulSoup
import html2text
import re
import sys
import StringIO
import getpass
from easygui import passwordbox
import collections
try:
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent', 'Chrome')]
# The site we will navigate into, handling it's session
br.open('https://mycourses.rit.edu/')
# Inspect name of the form
'''
for f in br.forms():
print f
'''
# Select the second (index one) form - the first form is a search query box
br.select_form(nr=0)
# User credentials
#####HANDLE LOGIN CHECKING#####
print " _ __ _ _ _ "
print " ___| | __ _ ___ ___ / _\ |_ __ _| | | _____ _ __ "
print " / __| |/ _` / __/ __|\ \| __/ _` | | |/ / _ \ '__|"
print " | (__| | (_| \__ \__ \_\ \ || (_| | | < __/ | "
print " \___|_|\__,_|___/___/\__/\__\__,_|_|_|\_\___|_| "
# print "\n"
print " _ ________ __ _ _ _ _ _"
print "| | _ |___ /\ \ / / | | || | | | | |"
print "| |__ _ _(_) / / \ \ /\ / /__ ___ __| | || |_| | | |"
print "| _ \| | | | / / \ \/ \/ / _ \/ _ \/ _` |__ _| | | |"
print "| |_) | |_| |_ / /__ \ /\ / __/ __/ (_| | | | | |__| |"
print "|_.__/ \__, (_) /_____| \/ \/ \___|\___|\__,_| |_| \____/ "
print " __/ | "
print " |___/ "
print "\n"
user = raw_input("What is your name shown as on MyCourses? ")
username = raw_input("Username: ")
print "Password: "
password = passwordbox("Password: ")
#password = getpass.getpass() #-> echos pass with IDLE
#password = raw_input("Password: ") -> echos pass
br.form['username'] = username
br.form['password'] = password
# Login
br.submit()
#Prints html of main page after login
#print(br.open('https://mycourses.rit.edu/d2l/lp/ouHome/defaultHome.d2l').read())
regex = '<a class="vui-link vui-outline d2l-link d2l-left" href="(.+?)" title="(.+?)">(.+?)</a>'
pattern = re.compile(regex)
regex2 = '<a class="vui-outline" onclick="EmailUser((.+?));;return false;" href="javascript://" title="(.+?)">(.+?)</a>'
pattern2 = re.compile(regex2)
###USE IN WHILE LOOP TO PRINT OUT STR
###PROMPT USER FOR HOW MANY CLASSES&&LABS ARE TAKEN AND USE THAT AS COUNTER VAR RATHER THAN i
htmltext = br.open('https://mycourses.rit.edu/d2l/lp/ouHome/defaultHome.d2l').read()
classes = re.findall(pattern,htmltext)
noClass = int(input("How Many Classes/Labs/Recitations are taken? "))
print "\n"
class1 = []
class2 = []
class3 = []
class4 = []
class5 = []
class6 = []
class7 = []
class8 = []
class9 = []
class10 = []
class11 = []
class12 = []
'''
class1Name = "Unavailable"
class2Name = "Unavailable"
class3Name = "Unavailable"
class4Name = "Unavailable"
class5Name = "Unavailable"
class6Name = "Unavailable"
class7Name = "Unavailable"
class8Name = "Unavailable"
class9Name = "Unavailable"
class10Name = "Unavailable"
class11Name = "Unavailable"
class12Name = "Unavailable"
'''
linkToList = []
#urls = []
i = 0
print "Classes that are being considered: \n"
while i < noClass: # 9 = 9 classes/labs
j = 0
course = str(classes[i]).split("', '")[1]
course = str(course).split("Enter ")[1]
'''
PRINTS HEADER FOR COURSE
'''
print course
print "================================================"
if i == 0:
class1.append(course)
class1Name = course
if i == 1:
class2.append(course)
class2Name = course
if i == 2:
class3.append(course)
class3Name = course
if i == 3:
class4.append(course)
class4Name = course
if i == 4:
class5.append(course)
class5Name = course
if i == 5:
class6.append(course)
class6Name = course
if i == 6:
class7.append(course)
class7Name = course
if i == 7:
class8.append(course)
class8Name = course
if i == 8:
class9.append(course)
class9Name = course
if i == 9:
class10.append(course)
class10Name = course
if i == 10:
class11.append(course)
class11Name = course
if i == 11:
class12.append(course)
class12Name = course
classLink = str(classes[i]).split("('")[1]
classLink = str(classLink).split("',")[0]
classId = str(classLink).split("=")[1]
listLink = "https://mycourses.rit.edu/d2l/lms/classlist/classlist.d2l?ou="+str(classId)
linkToList.append(listLink)
rosterPage = br.open(str(linkToList[i])).read()
rosterNames = re.findall(pattern2,rosterPage)
###WHILE LOOP NEEDED TO GET LEN OF CLASSLIST AND PRINT EACH
while j < len(rosterNames): # no of students
name = str(rosterNames[j]).split("Compose email to ")[1]
name = str(name).split("', ")[0]
if i == 0:
if name == str(user):
name=name
else:
class1.append(name)
if i == 1:
if name == str(user):
name=name
else:
class2.append(name)
if i == 2:
if name == str(user):
name=name
else:
class3.append(name)
if i == 3:
if name == str(user):
name=name
else:
class4.append(name)
if i == 4:
if name == str(user):
name=name
else:
class5.append(name)
if i == 5:
if name == str(user):
name=name
else:
class6.append(name)
if i == 6:
if name == str(user):
name=name
else:
class7.append(name)
if i == 7:
if name == str(user):
name=name
else:
class8.append(name)
if i == 8:
if name == str(user):
name=name
else:
class9.append(name)
if i == 9:
if name == str(user):
name=name
else:
class10.append(name)
if i == 10:
if name == str(user):
name=name
else:
class11.append(name)
if i == 11:
if name == str(user):
name=name
else:
class12.append(name)
#PRINTS INDIVIDUAL NAMES - INCLUDES SELF USER
#print str(name)
j+=1
#HELPS KEEP DISPLAY CLEAN
#print "\n"
i+=1
'''
#### PRINT ARRAYS OF CLASS ROSTERS ####
if not class1:
print "Class1 Array wasn't able to be populated."
else:
print class1Name
print class1
print "\n"
if not class2:
print "Class2 Array wasn't able to be populated."
else:
print class2Name
print class2
print "\n"
if not class3:
print "Class3 Array wasn't able to be populated."
else:
print class3Name
print class3
print "\n"
if not class4:
print "Class4 Array wasn't able to be populated."
else:
print class4Name
print class4
print "\n"
if not class5:
print "Class5 Array wasn't able to be populated."
else:
print class5Name
print class5
print "\n"
if not class6:
print "Class6 Array wasn't able to be populated."
else:
print class6Name
print class6
print "\n"
if not class7:
print "Class7 Array wasn't able to be populated."
else:
print class7Name
print class7
print "\n"
if not class8:
print "Class8 Array wasn't able to be populated."
else:
print class8Name
print class8
print "\n"
if not class9:
print "Class9 Array wasn't able to be populated."
else:
print class9Name
print class9
print "\n"
if not class10:
print "Class10 Array wasn't able to be populated."
else:
print class10Name
print class10
print "\n"
if not class11:
print "Class11 Array wasn't able to be populated."
else:
print class11Name
print class11
print "\n"
if not class12:
print "Class12 Array wasn't able to be populated."
else:
print class12Name
print class12
'''
print "\n"
'''
findname = raw_input("Search for person (eg. John Doe): ")
print "\n"
if findname in class1:
print str(findname) +" is in [" + str(class1Name) + "] with you."
if findname in class2:
print str(findname) +" is in [" + str(class2Name) + "] with you."
if findname in class3:
print str(findname) +" is in [" + str(class3Name) + "] with you."
if findname in class4:
print str(findname) +" is in [" + str(class4Name) + "] with you."
if findname in class5:
print str(findname) +" is in [" + str(class5Name) + "] with you."
if findname in class6:
print str(findname) +" is in [" + str(class6Name) + "] with you."
if findname in class7:
print str(findname) +" is in [" + str(class7Name) + "] with you."
if findname in class8:
print str(findname) +" is in [" + str(class8Name) + "] with you."
if findname in class9:
print str(findname) +" is in [" + str(class9Name) + "] with you."
if findname in class10:
print str(findname) +" is in [" + str(class10Name) + "] with you."
if findname in class11:
print str(findname) +" is in [" + str(class11Name) + "] with you."
if findname in class12:
print str(findname) +" is in [" + str(class12Name) + "] with you."
'''
allClassAndName = class1+class2+class3+class4+class5+class6+class7+class8+class9+class10+class11+class12
'''
if findname not in allClassAndName:
print "'"+str(findname) + "'" + " is not in any of your searched classes.\nTry inputting exact/full name as shown in MyCourses. (Case-sensitive)"
'''
print "\n"
#print allClassAndName
print "\n"
duplicates = []
duplicates = set([x for x in allClassAndName if allClassAndName.count(x) > 1])
dups = list(duplicates)
print "People you share more than one class with: "
print "============================================="
#print dups
p = 0
while p < len(dups):
#print list(duplicates)[s]
test = 0
while test < 12:
count = 0
if dups[p] in class1:
print dups[p] + " is in " + class1Name
count+=1
class1.remove(dups[p])
if dups[p] in class2:
print dups[p] + " is in " + class2Name
count+=1
class2.remove(dups[p])
if dups[p] in class3:
print dups[p] + " is in " + class3Name
count+=1
class3.remove(dups[p])
if dups[p] in class4:
print dups[p] + " is in " + class4Name
count+=1
class4.remove(dups[p])
if dups[p] in class5:
print dups[p] + " is in " + class5Name
count+=1
class5.remove(dups[p])
if dups[p] in class6:
print dups[p] + " is in " + class6Name
count+=1
class6.remove(dups[p])
if dups[p] in class7:
print dups[p] + " is in " + class7Name
count+=1
class7.remove(dups[p])
if dups[p] in class8:
print dups[p] + " is in " + class8Name
count+=1
class8.remove(dups[p])
if dups[p] in class9:
print dups[p] + " is in " + class9Name
count+=1
class9.remove(dups[p])
if dups[p] in class10:
print dups[p] + " is in " + class10Name
count+=1
class10.remove(dups[p])
if dups[p] in class11:
print dups[p] + " is in " + class11Name
count+=1
class11.remove(dups[p])
if dups[p] in class12:
print dups[p] + " is in " + class12Name
count+=1
class12.remove(dups[p])
print "[ " + dups[p] + " ] " + " is in " + str(count) + " of your classes"
break
test+=1
print "\n"
#dups.remove(dups[s])
p+=1
print "\n"
#print dups
### ********** TO BE DONE! REMOVE ALL OCCURENCES OF USER'S NAME FROM LIST###
####### Instead of going here, use unique 6 digit number and append to url
'''
classtext = br.open(str(urls[i])).read()
classList = re.findall(pattern2,classtext)
#print classList[2]
if i==8: # Conditonal neede because not all classes have classlist tab in same area
listLink = str(classList[0]).split("('")[1]
listLink = str(listLink).split("',")[0]
else:
listLink = str(classList[2]).split("('")[1]
listLink = str(listLink).split("',")[0]
listLink = "https://mycourses.rit.edu"+str(listLink)
linkToList.append(listLink)
print linkToList[i]
'''
except:
print "~~~Exception Thrown! Most likely incorrect login credentials.~~~"
|
import random
from common.helpers import drain_player_hunger_and_thirst, get_hunger_and_thirst_warnings
from common.world import get_tile_from
from models.item import Item
from models.player import Player
from models.player_inventory import PlayerInventory
def run_command(message, session):
player = session.query(Player).filter(Player.id == message.author.id).first()
tile = get_tile_from(player.x, player.y, session)
if not tile.can_forage:
response = 'You didn\'t find anything here, maybe try somewhere else.'
else:
item_lookup_results = session.query(Item).filter(Item.forage_drop_chance > 0).all()
possible_items = {item.id: item.forage_drop_chance for item in item_lookup_results}
foraged_item_id = random.choices(list(possible_items.keys()), possible_items.values())[0]
foraged_item = session.query(Item).filter(Item.id == foraged_item_id).first()
drain_player_hunger_and_thirst(player)
player_inventory = session.query(PlayerInventory).filter(PlayerInventory.player_id == message.author.id,
PlayerInventory.item_id == foraged_item.id).first()
if player_inventory:
player_inventory.item_amount += 1
else:
player.inventory.append(PlayerInventory(player_id=player.id, item_id=foraged_item.id, item_amount=1))
response = ':mag_right: {} went foraging and found a {}'.format(player.name, foraged_item.name)
response += get_hunger_and_thirst_warnings(player)
return response
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from collections import Counter
from typing import Callable, Dict, Iterator, Optional, TypeVar
from torch.utils.data import IterDataPipe
T = TypeVar("T")
U = TypeVar("U")
class UnderSampler(IterDataPipe[T]):
r""":class:`UnderSampler`.
Iterable datapipe wrapper for under-sampling.
Args:
datapipe: Iterable datapipe to undersample from.
row_to_label: Function called over each item from datapipe to generate
label/class.
seed: Random seed for reproducibility.
"""
def __init__(
self,
datapipe: IterDataPipe[T],
row_to_label: Callable[[T], U],
seed: Optional[int] = None,
) -> None:
self.datapipe = datapipe
self.row_to_label = row_to_label
self.seed = seed
self.rng = random.Random(seed)
def __iter__(self) -> Iterator[T]:
raise NotImplementedError
class DistributionUnderSampler(UnderSampler[T]):
r""":class:`DistributionUnderSampler`.
Iterable datapipe wrapper for under-sampling if a desired output distribution of
labels/classes is known. This method is based on rejection sampling.
Args:
datapipe: Iterable datapipe to undersample from.
row_to_label: Function called over each item from datapipe to generate
label/class.
output_dist: The desired label/class distribution. The keys are the classes
while the values are the desired class percentages. The values,
however, do not have to be normalized to sum up to 1.
input_dist: Optional distribution describing label/class distribution of the
input. The keys are the classes while the values are the class
percentages. The values, however, do not have to be normalized to sum up
to 1. If not known, then :class:`DistributionUnderSampler` will
keep a running estimate of the distribution as it processes datapipe.
If known, then :class:`DistributionUnderSampler` will not update the
distribution as it processes datapipe.
seed: Random seed for reproducibility.
References:
- https://www.wikiwand.com/en/Rejection_sampling
NOTE: This class is adapted from https://github.com/MaxHalford/pytorch-resample.
"""
def __init__(
self,
datapipe: IterDataPipe[T],
row_to_label: Callable[[T], U],
output_dist: Dict[U, float],
input_dist: Optional[Dict[U, float]] = None,
seed: Optional[int] = None,
) -> None:
if any(v < 0 for v in output_dist.values()):
raise ValueError("Only non-negative values are allowed in output_dist.")
if input_dist:
if any(v <= 0 for v in input_dist.values()):
raise ValueError("Only positive values are allowed in input_dist.")
if not (output_dist.keys() <= input_dist.keys()):
raise ValueError(
"All keys in output_dist must be present in input_dist."
)
super().__init__(datapipe, row_to_label, seed=seed)
self.input_dist: Counter[U] = Counter(input_dist) # pyre-ignore[6]
self.output_dist: Counter[U] = Counter(output_dist)
self._update_input_dist: bool = not bool(input_dist)
# The pivot represents the class for which no undersampling is performed.
self._pivot: Optional[U] = None
def __iter__(self) -> Iterator[T]:
for row in self.datapipe:
# To ease notation
f = self.output_dist
g = self.input_dist
y = self.row_to_label(row)
if self._update_input_dist:
g[y] += 1
# Determine the sampling ratio
if self._pivot is None or self._update_input_dist:
self._pivot = max(g.keys(), key=lambda y: f[y] / g[y])
numerator = f[y] * g[self._pivot] # pyre-ignore[36]
denominator = f[self._pivot] * g[y] # pyre-ignore[36]
ratio = (numerator / denominator) if denominator > 0 else 0
if self.rng.random() < ratio:
yield row
class ProportionUnderSampler(UnderSampler[T]):
r""":class:`ProportionUnderSampler`.
Iterable datapipe wrapper for under-sampling if it is known how much to undersample
each label/class.
Args:
datapipe: Iterable datapipe to undersample from.
row_to_label: Function called over each item from datapipe to generate
label/class.
proportions: How much to undersample each label/class. The keys are the classes
while the values indicate what proportion of the rows of a specific class
should be kept. Example: a proportion of 0.3 for class c indicates that
30% of rows from datapipe whose label is c should be kept.
seed: Random seed for reproducibility.
"""
def __init__(
self,
datapipe: IterDataPipe[T],
row_to_label: Callable[[T], U],
proportions: Dict[U, float],
seed: Optional[int] = None,
) -> None:
if any(p < 0 or p > 1 for p in proportions.values()):
raise ValueError("All proportions must be within 0 and 1.")
super().__init__(datapipe, row_to_label, seed=seed)
self.proportions = proportions
def __iter__(self) -> Iterator[T]:
for row in self.datapipe:
if self.rng.random() < self.proportions[self.row_to_label(row)]:
yield row
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import util as distribution_util
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
du = distribution_util
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
def _logit(x):
x = np.asarray(x)
return np.log(x) - np.log1p(-x)
class AssertCloseTest(test.TestCase):
@test_util.run_deprecated_v1
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
# First component isn't less than float32.eps = 1e-7
z = array_ops.placeholder(dtypes.float32)
# This shouldn"t be detected as an integer.
w = array_ops.placeholder(dtypes.float32)
feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],
z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}
with self.cached_session():
with ops.control_dependencies([du.assert_integer_form(x)]):
array_ops.identity(x).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(y)]):
array_ops.identity(y).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(z)]):
array_ops.identity(z).eval(feed_dict=feed_dict)
with self.assertRaisesOpError("has non-integer components"):
with ops.control_dependencies(
[du.assert_integer_form(w)]):
array_ops.identity(w).eval(feed_dict=feed_dict)
class MaybeGetStaticTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testGetStaticInt(self):
x = 2
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticNumpyArray(self):
x = np.array(2, dtype=np.int32)
self.assertEqual(x, du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_in_graph_and_eager_modes
def testGetStaticConstant(self):
x = constant_op.constant(2, dtype=dtypes.int32)
self.assertEqual(np.array(2, dtype=np.int32), du.maybe_get_static_value(x))
self.assertAllClose(
np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))
@test_util.run_deprecated_v1
def testGetStaticPlaceholder(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
self.assertEqual(None, du.maybe_get_static_value(x))
self.assertEqual(None, du.maybe_get_static_value(x, dtype=np.float64))
class GetLogitsAndProbsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testImproperArguments(self):
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=None, probs=None)
with self.assertRaises(ValueError):
du.get_logits_and_probs(logits=[0.1], probs=[0.1])
@test_util.run_in_graph_and_eager_modes
def testLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = _logit(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, validate_args=True)
self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)
self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)
@test_util.run_in_graph_and_eager_modes
def testLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
new_logits, new_p = du.get_logits_and_probs(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(self.evaluate(new_p), p)
self.assertAllClose(self.evaluate(new_logits), logits)
@test_util.run_in_graph_and_eager_modes
def testProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(probs=p, validate_args=True)
self.assertAllClose(_logit(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
new_logits, new_p = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), self.evaluate(new_logits))
self.assertAllClose(p, self.evaluate(new_p))
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
_, prob = du.get_logits_and_probs(probs=p, validate_args=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(probs=p2, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p2, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs has components greater than 1"):
_, prob = du.get_logits_and_probs(probs=p3, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(probs=p3, validate_args=False)
self.evaluate(prob)
@test_util.run_in_graph_and_eager_modes
def testProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
_, prob = du.get_logits_and_probs(probs=p, multidimensional=True)
self.evaluate(prob)
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p2, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError(
"(probs has components greater than 1|probs does not sum to 1)"):
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p3, multidimensional=True, validate_args=False)
self.evaluate(prob)
with self.assertRaisesOpError("probs does not sum to 1"):
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=True)
self.evaluate(prob)
_, prob = du.get_logits_and_probs(
probs=p4, multidimensional=True, validate_args=False)
self.evaluate(prob)
@test_util.run_deprecated_v1
def testProbsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
p = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
p = array_ops.placeholder(dtype=dtypes.float16)
_, prob = du.get_logits_and_probs(
probs=p, multidimensional=True, validate_args=True)
prob.eval(feed_dict={p: np.ones([int(2**11+1)])})
@test_util.run_deprecated_v1
def testLogitsMultidimShape(self):
with self.cached_session():
with self.assertRaises(ValueError):
l = array_ops.ones([int(2**11+1)], dtype=np.float16)
du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
l = array_ops.placeholder(dtype=dtypes.float16)
logit, _ = du.get_logits_and_probs(
logits=l, multidimensional=True, validate_args=True)
logit.eval(feed_dict={l: np.ones([int(2**11+1)])})
class EmbedCheckCategoricalEventShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testTooSmall(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([1], dtype=np.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"must have at least 2 events"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([1])})
@test_util.run_deprecated_v1
def testTooLarge(self):
with self.cached_session():
with self.assertRaises(ValueError):
param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
with self.assertRaisesOpError(
"Number of classes exceeds `dtype` precision"):
param = array_ops.placeholder(dtype=dtypes.float16)
checked_param = du.embed_check_categorical_event_shape(
param)
checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
param = ops.convert_to_tensor(
np.ones([2**11 + 1]).astype(dtypes.qint16.as_numpy_dtype),
dtype=dtypes.qint16)
with self.assertRaises(TypeError):
du.embed_check_categorical_event_shape(param)
class EmbedCheckIntegerCastingClosedTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyAssertsNonnegative(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be non-negative"):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssersIntegerForm(self):
with self.cached_session():
with self.assertRaisesOpError("Elements must be int16-equivalent."):
x = array_ops.placeholder(dtype=dtypes.float16)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsLargestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot exceed 32767."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.int16)
x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})
@test_util.run_deprecated_v1
def testCorrectlyAssertsSmallestPossibleInteger(self):
with self.cached_session():
with self.assertRaisesOpError("Elements cannot be smaller than 0."):
x = array_ops.placeholder(dtype=dtypes.int32)
x_checked = du.embed_check_integer_casting_closed(
x, target_dtype=dtypes.uint16, assert_nonnegative=False)
x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})
@test_util.run_all_in_graph_and_eager_modes
class LogCombinationsTest(test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
if not special:
return
log_combs = np.log(special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class DynamicShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testSameDynamicShape(self):
with self.cached_session():
scalar = constant_op.constant(2.0)
scalar1 = array_ops.placeholder(dtype=dtypes.float32)
vector = [0.3, 0.4, 0.5]
vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])
multidimensional = [[0.3, 0.4], [0.2, 0.6]]
multidimensional1 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
multidimensional2 = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None])
# Scalar
self.assertTrue(
du.same_dynamic_shape(scalar, scalar1).eval({
scalar1: 2.0
}))
# Vector
self.assertTrue(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertTrue(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [2.0, 3.5, 6.0]
}))
# Multidimensional
self.assertTrue(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertTrue(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5], [6.3, 2.3]]
}))
# Scalar, X
self.assertFalse(
du.same_dynamic_shape(scalar, vector1).eval({
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, vector1).eval({
scalar1: 2.0,
vector1: [2.0, 3.0, 4.0]
}))
self.assertFalse(
du.same_dynamic_shape(scalar, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(scalar1, multidimensional1).eval(
{
scalar1: 2.0,
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Vector, X
self.assertFalse(
du.same_dynamic_shape(vector, vector1).eval({
vector1: [2.0, 3.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, vector2).eval({
vector1: [2.0, 3.0, 4.0],
vector2: [6.0]
}))
self.assertFalse(
du.same_dynamic_shape(vector, multidimensional1).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
self.assertFalse(
du.same_dynamic_shape(vector1, multidimensional1).eval(
{
vector1: [2.0, 3.0, 4.0],
multidimensional1: [[2.0, 3.0], [3.0, 4.0]]
}))
# Multidimensional, X
self.assertFalse(
du.same_dynamic_shape(
multidimensional, multidimensional1).eval({
multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
self.assertFalse(
du.same_dynamic_shape(
multidimensional1, multidimensional2).eval({
multidimensional1: [[2.0, 3.0], [3.0, 4.0]],
multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]
}))
class RotateTransposeTest(test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
@test_util.run_in_graph_and_eager_modes
def testRollStatic(self):
if context.executing_eagerly():
error_message = r"Attempt to convert a value \(None\)"
else:
error_message = "None values not supported."
with self.assertRaisesRegexp(ValueError, error_message):
du.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = du.rotate_transpose(x, shift)
self.assertAllEqual(
self._np_rotate_transpose(x, shift), self.evaluate(y))
self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())
@test_util.run_deprecated_v1
def testRollDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
shift = array_ops.placeholder(dtypes.int32)
for x_value in (np.ones(
1, dtype=x.dtype.as_numpy_dtype()), np.ones(
(2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(
(3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(du.rotate_transpose(x, shift),
feed_dict={x: x_value,
shift: shift_value}))
class PickVectorTest(test.TestCase):
@test_util.run_deprecated_v1
def testCorrectlyPicksVector(self):
with self.cached_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, self.evaluate(du.pick_vector(math_ops.less(0, 5), x, y)))
self.assertAllEqual(
y, self.evaluate(du.pick_vector(math_ops.less(5, 0), x, y)))
self.assertAllEqual(x,
du.pick_vector(
constant_op.constant(True), x, y)) # No eval.
self.assertAllEqual(y,
du.pick_vector(
constant_op.constant(False), x, y)) # No eval.
class PreferStaticRankTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(3, rank)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(1, rank)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
rank = du.prefer_static_rank(x)
self.assertIsInstance(rank, np.ndarray)
self.assertEqual(0, rank)
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(1, rank.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicRankEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
rank = du.prefer_static_rank(x)
with self.cached_session():
self.assertAllEqual(0, rank.eval(feed_dict={x: 1}))
class PreferStaticShapeTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([2, 3, 4]), shape)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([0]), shape)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
shape = du.prefer_static_shape(x)
self.assertIsInstance(shape, np.ndarray)
self.assertAllEqual(np.array([]), shape)
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicShapeEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
shape = du.prefer_static_shape(x)
with self.cached_session():
self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))
class PreferStaticValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testNonEmptyConstantTensor(self):
x = array_ops.zeros((2, 3, 4))
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.zeros((2, 3, 4)), value)
@test_util.run_deprecated_v1
def testEmptyConstantTensor(self):
x = constant_op.constant([])
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array([]), value)
@test_util.run_deprecated_v1
def testScalarTensor(self):
x = constant_op.constant(1.)
value = du.prefer_static_value(x)
self.assertIsInstance(value, np.ndarray)
self.assertAllEqual(np.array(1.), value)
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingNonEmpty(self):
x = array_ops.placeholder(np.float64, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.zeros((2, 3)),
value.eval(feed_dict={x: np.zeros((2, 3))}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingEmpty(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []}))
@test_util.run_deprecated_v1
def testDynamicValueEndsUpBeingScalar(self):
x = array_ops.placeholder(np.int32, shape=None)
value = du.prefer_static_value(x)
with self.cached_session():
self.assertAllEqual(np.array(1), value.eval(feed_dict={x: 1}))
class FillTriangularTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_triangular(self, x, upper=False):
"""Numpy implementation of `fill_triangular`."""
x = np.asarray(x)
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1])
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Invalid shape.")
n = np.int32(n)
# We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle
# `m == n == 1`. Hence, we do absolute indexing.
x_tail = x[..., (m - (n * n - m)):]
y = np.concatenate(
[x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],
axis=-1)
y = y.reshape(np.concatenate([
np.int32(x.shape[:-1]),
np.int32([n, n]),
], axis=0))
return np.triu(y) if upper else np.tril(y)
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
# Add `zeros_like(x)` such that x's value and gradient are identical. We
# do this so we can ensure each gradient value is mapped to the right
# gradient location. (Not doing this means the gradient wrt `x` is simple
# `ones_like(x)`.)
# Note:
# zeros_like_x_pl == zeros_like(x_pl)
# gradient(zeros_like_x_pl, x_pl) == x_pl - 1
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
grad_actual = gradients_impl.gradients(actual, x_pl)[0]
[actual_, grad_actual_] = sess.run([actual, grad_actual],
feed_dict={x_pl: x_})
expected = self._fill_triangular(x_, **kwargs)
if use_deferred_shape:
self.assertEqual(None, actual.shape)
else:
self.assertAllEqual(expected.shape, actual.shape)
self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)
self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriLower(self):
self._run_test(self._rng.randn(3, int(1*2/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriLower(self):
self._run_test(self._rng.randn(int(4*5/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLower(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriLower(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)))
@test_util.run_deprecated_v1
def testCorrectlyMakes1x1TriUpper(self):
self._run_test(self._rng.randn(3, int(1*2/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesNoBatchTriUpper(self):
self._run_test(self._rng.randn(int(4*5/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpper(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatchTriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 2, int(3*4/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)),
use_deferred_shape=True,
upper=True)
@test_util.run_deprecated_v1
def testCorrectlyMakesBatch7x7TriUpper(self):
self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True)
class FillTriangularInverseTest(FillTriangularTest):
def _run_test(self, x_, use_deferred_shape=False, **kwargs):
x_ = np.asarray(x_)
with self.cached_session() as sess:
static_shape = None if use_deferred_shape else x_.shape
x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)
zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)
- array_ops.stop_gradient(x_pl * (x_pl - 1.)))
x = x_pl + zeros_like_x_pl
actual = du.fill_triangular(x, **kwargs)
inverse_actual = du.fill_triangular_inverse(actual, **kwargs)
inverse_actual_ = sess.run(
inverse_actual,
feed_dict={x_pl: x_})
if use_deferred_shape:
self.assertEqual(None, inverse_actual.shape)
else:
self.assertAllEqual(x_.shape, inverse_actual.shape)
self.assertAllEqual(x_, inverse_actual_)
class ReduceWeightedLogSumExp(test.TestCase):
def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False):
m = np.max(logx, axis=axis, keepdims=True)
sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims)
sgn = np.sign(sum_)
if not keep_dims:
m = np.squeeze(m, axis=axis)
return m + np.log(sgn * sum_), sgn
@test_util.run_deprecated_v1
def testNoWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
expected = math_ops.reduce_logsumexp(logx, axis=-1)
grad_expected = gradients_impl.gradients(expected, logx)[0]
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, axis=-1, return_sign=True)
grad_actual = gradients_impl.gradients(actual, logx)[0]
[actual_, actual_sgn_, grad_actual_,
expected_, grad_expected_] = sess.run([
actual, actual_sgn, grad_actual,
expected, grad_expected])
self.assertAllEqual(expected_, actual_)
self.assertAllEqual(grad_expected_, grad_actual_)
self.assertAllEqual([1., 1, 1], actual_sgn_)
def testNegativeWeights(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1)
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
w = constant_op.constant(w_)
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True)
[actual_, actual_sgn_] = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([-1., -1, 1], actual_sgn_)
def testKeepDims(self):
logx_ = np.array([[0., -1, 1000.],
[0, 1, -1000.],
[-5, 0, 5]])
w_ = np.array([[1., 1, -1],
[1, -2, 1],
[1, 0, 1]])
expected, _ = self._reduce_weighted_logsumexp(
logx_, w_, axis=-1, keep_dims=True)
with self.cached_session() as sess:
logx = constant_op.constant(logx_)
w = constant_op.constant(w_)
actual, actual_sgn = du.reduce_weighted_logsumexp(
logx, w, axis=-1, return_sign=True, keep_dims=True)
[actual_, actual_sgn_] = self.evaluate([actual, actual_sgn])
self.assertAllEqual(expected, actual_)
self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_)
def testDocString(self):
"""This test verifies the correctness of the docstring examples."""
with self.cached_session():
x = constant_op.constant([[0., 0, 0],
[0, 0, 0]])
w = constant_op.constant([[-1., 1, 1],
[1, 1, 1]])
self.assertAllClose(
np.log(4), self.evaluate(du.reduce_weighted_logsumexp(x, w)))
with np.errstate(divide="ignore"):
self.assertAllClose(
np.log([0, 2, 2]),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=0)))
self.assertAllClose(
np.log([1, 3]),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=1)))
self.assertAllClose(
np.log([[1], [3]]),
self.evaluate(
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)))
self.assertAllClose(
np.log(4),
self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=[0, 1])))
class GenNewSeedTest(test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(du.gen_new_seed(0, "salt") is None)
self.assertTrue(du.gen_new_seed(None, "salt") is None)
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/kernel_tests/softplus_op_test.py
# once TF core is accepting new ops.
class SoftplusTest(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_features = np.asarray(np_features)
np_softplus = self._npSoftplus(np_features)
with self.session(use_gpu=use_gpu) as sess:
softplus = nn_ops.softplus(np_features)
softplus_inverse = du.softplus_inverse(softplus)
[tf_softplus, tf_softplus_inverse] = sess.run([
softplus, softplus_inverse])
self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
str(np_features.dtype), 1e-6)
# This will test that we correctly computed the inverse by verifying we
# recovered the original input.
self.assertAllCloseAccordingToType(
np_features, tf_softplus_inverse,
atol=0., rtol=rtol)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
tf_softplus > 0)
self.assertShapeEqual(np_softplus, softplus)
self.assertShapeEqual(np_softplus, softplus_inverse)
self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
np.isfinite(tf_softplus))
self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
np.isfinite(tf_softplus_inverse))
@test_util.run_deprecated_v1
def testNumbers(self):
for t in [np.float16, np.float32, np.float64]:
lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)
upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=False)
self._testSoftplus(
np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(
[2, -1]),
use_gpu=True)
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
tf_logging.vlog(2, "softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testInverseSoftplusGradientNeverNan(self):
with self.cached_session():
# Note that this range contains both zero and inf.
x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllFalse` (if it existed).
self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))
@test_util.run_deprecated_v1
def testInverseSoftplusGradientFinite(self):
with self.cached_session():
# This range of x is all finite, and so is 1 / x. So the
# gradient and its approximations should be finite as well.
x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16))
y = du.softplus_inverse(x)
grads = self.evaluate(gradients_impl.gradients(y, x)[0])
# Equivalent to `assertAllTrue` (if it existed).
self.assertAllEqual(
np.ones_like(grads).astype(np.bool), np.isfinite(grads))
@test_util.run_all_in_graph_and_eager_modes
class ArgumentsTest(test.TestCase):
def testNoArguments(self):
def foo():
return du.parent_frame_arguments()
self.assertEqual({}, foo())
def testPositionalArguments(self):
def foo(a, b, c, d): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(1, 2, 3, 4))
# Tests that it does not matter where this function is called, and
# no other local variables are returned back.
def bar(a, b, c):
unused_x = a * b
unused_y = c * 3
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, bar(1, 2, 3))
def testOverloadedArgumentValues(self):
def foo(a, b, c): # pylint: disable=unused-argument
a = 42
b = 31
c = 42
return du.parent_frame_arguments()
self.assertEqual({"a": 42, "b": 31, "c": 42}, foo(1, 2, 3))
def testKeywordArguments(self):
def foo(**kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3, "d": 4}, foo(a=1, b=2, c=3, d=4))
def testPositionalKeywordArgs(self):
def foo(a, b, c, **kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(a=1, b=2, c=3, unicorn=None))
def testNoVarargs(self):
def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument
return du.parent_frame_arguments()
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(a=1, b=2, c=3))
self.assertEqual({"a": 1, "b": 2, "c": 3}, foo(1, 2, 3, *[1, 2, 3]))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, unicorn=None))
self.assertEqual({"a": 1, "b": 2, "c": 3, "unicorn": None},
foo(1, 2, 3, *[1, 2, 3], unicorn=None))
if __name__ == "__main__":
test.main()
|
import psycopg2
import collections
import json
import sys
def consulta(query):
cursor.execute(query)
return cursor.fetchall()
def constroi_consulta_lista(lista_tabelas):
tabelas = ""
for tabela in lista_tabelas:
tabelas = tabelas + "'" + tabela + "',"
query = "SELECT distinct cl2.relname AS ref_table FROM pg_constraint as co JOIN pg_class AS cl1 ON co.conrelid=cl1.oid JOIN pg_class AS cl2 ON co.confrelid=cl2.oid WHERE co.contype='f' AND cl1.relname in (" + tabelas + ") AND cl2.relname <> cl1.relname ORDER BY cl2.relname"
return query.replace(",)",")")
def constroi_consulta(tabela):
tabela = "'" + tabela + "'"
query = "SELECT distinct cl2.relname AS ref_table FROM pg_constraint as co JOIN pg_class AS cl1 ON co.conrelid=cl1.oid JOIN pg_class AS cl2 ON co.confrelid=cl2.oid WHERE co.contype='f' AND cl1.relname = " + tabela + " AND cl2.relname <> cl1.relname ORDER BY cl2.relname"
return query
def convert_tupla_lista(lista_tupla):
lista_temp = []
for tupla in lista_tupla:
lista_temp = lista_temp + [tupla[0]]
return lista_temp
def consulta_tabelas_dependentes_lista(lista_tabelas, lista_tabelas_resultado):
lista_tabelas_temp = []
if (lista_tabelas is not None) and (lista_tabelas):
lista_tabelas_temp = convert_tupla_lista(consulta(constroi_consulta_lista(lista_tabelas=lista_tabelas)), )
[lista_tabelas_resultado.append(item) if item not in lista_tabelas_resultado else None for item in lista_tabelas_temp]
return consulta_tabelas_dependentes_lista(lista_tabelas=lista_tabelas_temp, lista_tabelas_resultado=lista_tabelas_resultado)
else:
return lista_tabelas_resultado
def le_arquivo_json(filename):
print(filename)
f = open(filename, 'r')
lista = []
for row in f:
lista.append(json.loads(row.replace('\\\\','\\')))
return lista
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
'''
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_classe_judicial"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_assunto_trf"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_competencia"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_orgao_julgador"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_jurisdicao"], lista_tabelas_resultado=lista_tabelas_resultado))
'''
from upsert import Upsert
import traceback
def migra_tabela(tabela):
try:
json_id_tabela = le_arquivo_json(tabela + "_ids.json")
json_tabela = le_arquivo_json(tabela + ".json")
linhas = len(json_id_tabela)
upsert = Upsert(cursor, tabela)
i=0
while i < linhas:
upsert.row(json_id_tabela[i] , json_tabela[i])
i = i + 1
return True
except:
traceback.print_exc(file=sys.stdout)
return False
def desabilita_triggers(tabela):
cursor.execute("ALTER TABLE " + tabela + " DISABLE TRIGGER ALL;")
def habilita_triggers(tabela):
cursor.execute("ALTER TABLE " + tabela + " ENABLE TRIGGER ALL;")
def migra_linha():
upsert = Upsert(cursor, "tb_endereco" )
upsert.row({'id_endereco': 100054} , {'nm_logradouro': "RUA HERACLITO", 'id_cep': 365878})
def migra_tabelas(lista_tabelas):
return [migra_tabela(tabela) for tabela in lista_tabelas]
''' Conexao pjesup
pjesupconn = psycopg2.connect("dbname=pje user=pjeadmin password=pj3adm1n-TJMG host=linbdpje-5 port=5432")
pjesupcursor = pjesupconn.cursor()
cursor = pjesupcursor
'''
#Conexao pjetst
pjetstconn = psycopg2.connect("dbname=pje user=pjeadmin password=pj3adm1n-TJMG host=linbdpje-10 port=5432")
pjetstcursor = pjetstconn.cursor()
cursor = pjetstcursor
''' conexao pjetstcasa
pje_local_conn = psycopg2.connect("dbname=pje user=postgres password=123456 host=localhost port=5432")
pje_local_cursor = pje_local_conn.cursor()
cursor = pje_local_cursor
'''
''' # conexao pjetstlocal
pje_tstlocal_conn = psycopg2.connect("dbname=pjetst user=postgres password=Postgres1234 host=localhost port=5432")
pje_tstlocal_cursor = pje_tstlocal_conn.cursor()
cursor = pje_tstlocal_cursor
'''
cursor.execute("set search_path = public, acl, core, client, criminal, jt; SET CONSTRAINTS ALL DEFERRED;")
lista_tabelas = ['tb_classe_judicial','tb_assunto_trf','tb_competencia','tb_orgao_julgador']
lista_tabelas.reverse()
migra_tabelas(lista_tabelas)
#migra_tabela("tb_classe_judicial")
|
import heffte
import numpy as np
from numba import cuda as gpu
import mpi4py
def make_reference(num_entries, dtype, scale):
reference = np.zeros((num_entries,), dtype)
reference[0] = -512.0
if scale == heffte.scale.symmetric:
reference /= np.sqrt(float(2 * num_entries))
elif scale == heffte.scale.full:
reference /= float(2 * num_entries)
return reference
comm = mpi4py.MPI.COMM_WORLD
me = comm.Get_rank()
assert comm.Get_size() == 2
box = (heffte.box3d([0, 0, 0], [3, 3, 1])
if me == 0 else
heffte.box3d([0, 0, 2], [3, 3, 3]))
fft = heffte.fft3d(heffte.backend.cufft,
box, box, comm)
assert fft.size_inbox() == 32
assert fft.size_outbox() == 32
test_types = [[np.float32, np.complex64, heffte.scale.none, heffte.scale.full, 1.E-3],
[np.complex64, np.complex64, heffte.scale.symmetric, heffte.scale.symmetric, 1.E-3],
[np.float64, np.complex128, heffte.scale.full, heffte.scale.none, 1.E-11],
[np.complex128, np.complex128, heffte.scale.none, heffte.scale.full, 1.E-11],
]
for tt in test_types:
in_array = np.array(range(fft.size_inbox()), tt[0])
out_array = np.empty(fft.size_outbox(), tt[1])
gpu.select_device(0)
gpu_in = gpu.to_device(in_array)
gpu_out = gpu.to_device(out_array)
fft.forward(gpu_in, gpu_out, tt[2])
reference = make_reference(fft.size_outbox(), tt[1], tt[2]) # num_entries, type, scale
if me == 1:
assert np.max(np.abs(reference - gpu_out.copy_to_host())) < tt[4]
in_array = np.zeros((fft.size_inbox(),), tt[0])
gpu_in = gpu.to_device(in_array)
fft.backward(gpu_out, gpu_in, tt[3])
assert np.max(np.abs(gpu_in.copy_to_host() - np.array(range(fft.size_inbox()), tt[0]))) < tt[4]
if me == 0:
print(" pass ",tt[0]," -> ",tt[1])
|
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"feature",
"flag_group",
"flag_set",
"tool_path",
"with_feature_set",
)
def _impl(ctx):
tool_paths = [
tool_path(
name = "gcc",
path = "emcc.sh",
),
tool_path(
name = "ld",
path = "emcc.sh",
),
tool_path(
name = "ar",
path = "emar.sh",
),
tool_path(
name = "cpp",
path = "false.sh",
),
tool_path(
name = "gcov",
path = "false.sh",
),
tool_path(
name = "nm",
path = "NOT_USED",
),
tool_path(
name = "objdump",
path = "false.sh",
),
tool_path(
name = "strip",
path = "NOT_USED",
),
]
preprocessor_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.clif_match,
]
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
all_compile_actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.clif_match,
ACTION_NAMES.lto_backend,
]
toolchain_include_directories_feature = feature(
name = "toolchain_include_directories",
enabled = True,
flag_sets = [
flag_set(
actions = all_compile_actions,
flag_groups = [
flag_group(
flags = [
# The clang compiler comes with a definition of
# max_align_t struct in $emsdk/upstream/lib/clang/13.0.0/include/__stddef_max_align_t.h.
# It conflicts with the one defined in
# $emsdk/upstream/emscripten/cache/sysroot/include/bits/alltypes.h.
# We need both include paths to make things work.
#
# To workaround this, we are defining the following
# symbol through compiler flag so that the max_align_t
# defined in clang's header file will be skipped.
"-D",
"__CLANG_MAX_ALIGN_T_DEFINED",
# We are using emscripten 2.0.15 for this build. It
# comes with clang 13.0.0. Future emscripten release
# might change the clang version number below.
#
# Also need to change the version number in
# cxx_cxx_builtin_include_directories below.
"-isystem",
"external/emsdk/emsdk/upstream/lib/clang/13.0.0/include",
],
),
],
),
],
)
crosstool_default_flag_sets = [
# Optimized (opt)
flag_set(
actions = preprocessor_compile_actions,
flag_groups = [flag_group(flags = ["-DNDEBUG"])],
with_features = [with_feature_set(features = ["opt"])],
),
# Overriding to use -O2 instead of -O3 because asmjs breaks.
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-g0", "-O2"])],
with_features = [with_feature_set(features = ["opt"])],
),
# Fastbuild (fastbuild)
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-O2"])],
with_features = [with_feature_set(features = ["fastbuild"])],
),
# Debug (dbg)
flag_set(
actions = all_compile_actions + all_link_actions,
flag_groups = [flag_group(flags = ["-g2", "-O0"])],
with_features = [with_feature_set(features = ["dbg"])],
),
]
features = [
toolchain_include_directories_feature,
# These 3 features will be automatically enabled by blaze in the
# corresponding build mode.
feature(
name = "opt",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "dbg",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "fastbuild",
provides = ["variant:crosstool_build_mode"],
),
feature(
name = "crosstool_default_flags",
enabled = True,
flag_sets = crosstool_default_flag_sets,
),
]
cxx_builtin_include_directories = [
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include/c++/v1",
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include/compat",
"external/emsdk/emsdk/upstream/emscripten/cache/sysroot/include",
# We are using emscripten 2.0.15 for this build. It comes with clang
# 13.0.0. Future emscripten release might change the clang version
# number below.
#
# Also need to change the version number in
# toolchain_include_directories_feature above.
"external/emsdk/emsdk/upstream/lib/clang/13.0.0/include",
]
builtin_sysroot = "external/emsdk/emsdk/upstream/emscripten/cache/sysroot"
return cc_common.create_cc_toolchain_config_info(
ctx = ctx,
toolchain_identifier = "wasm-toolchain",
host_system_name = "i686-unknown-linux-gnu",
target_system_name = "wasm-unknown-emscripten",
target_cpu = "wasm",
target_libc = "musl/js",
compiler = "emscripten",
abi_version = "emscripten_syscalls",
abi_libc_version = "default",
tool_paths = tool_paths,
features = features,
builtin_sysroot = builtin_sysroot,
cxx_builtin_include_directories = cxx_builtin_include_directories,
)
cc_toolchain_config = rule(
implementation = _impl,
attrs = {},
provides = [CcToolchainConfigInfo],
)
def _emsdk_impl(ctx):
if "EMSDK" not in ctx.os.environ or ctx.os.environ["EMSDK"].strip() == "":
fail("The environment variable EMSDK is not found. " +
"Did you run source ./emsdk_env.sh ?")
path = ctx.os.environ["EMSDK"]
ctx.symlink(path, "emsdk")
ctx.file("BUILD", """
filegroup(
name = "all",
srcs = glob(["emsdk/**"]),
visibility = ["//visibility:public"],
)
""")
emsdk_configure = repository_rule(
implementation = _emsdk_impl,
local = True,
)
|
#
# https://github.com/richardkiss/pycoinnet/
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Kiss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import binascii
from collections import namedtuple
from pycoin.block import Block
from spruned.dependencies.pycoinnet.pycoin.make_parser_and_packer import (
make_parser_and_packer, standard_messages,
standard_message_post_unpacks, standard_streamer, standard_parsing_functions
)
from pycoin.tx.Tx import Tx
Network = namedtuple(
'Network', (
'code',
'magic_header', 'dns_bootstrap', 'default_port', 'pack_from_data',
'parse_from_data'
)
)
streamer = standard_streamer(standard_parsing_functions(Block, Tx))
btc_parser, btc_packer = make_parser_and_packer(
streamer, standard_messages(), standard_message_post_unpacks(streamer))
MAINNET = Network(
'BTC', binascii.unhexlify('F9BEB4D9'), [
"seed.bitcoin.sipa.be", "dnsseed.bitcoin.dashjr.org",
"bitseed.xf2.org", "dnsseed.bluematt.me",
],
8333,
btc_packer,
btc_parser,
)
TESTNET = Network(
'XTC', binascii.unhexlify('0B110907'), [
"testnet-seed.bitcoin.jonasschnelli.ch"
],
18333,
btc_packer,
btc_parser,
)
REGTEST = Network(
'XTC', binascii.unhexlify('fabfb5da'), [],
18444,
btc_packer,
btc_parser,
)
NETWORKS = [MAINNET, TESTNET, REGTEST]
|
version = '0.0.7'
author = 'XESS Corp.'
email = 'info@xess.com'
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.formula_and_function_event_query_group_by_sort import (
FormulaAndFunctionEventQueryGroupBySort,
)
globals()["FormulaAndFunctionEventQueryGroupBySort"] = FormulaAndFunctionEventQueryGroupBySort
class FormulaAndFunctionEventQueryGroupBy(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"facet": (str,), # noqa: E501
"limit": (int,), # noqa: E501
"sort": (FormulaAndFunctionEventQueryGroupBySort,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"facet": "facet", # noqa: E501
"limit": "limit", # noqa: E501
"sort": "sort", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, facet, *args, **kwargs): # noqa: E501
"""FormulaAndFunctionEventQueryGroupBy - a model defined in OpenAPI
Args:
facet (str): Event facet.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
limit (int): Number of groups to return.. [optional] # noqa: E501
sort (FormulaAndFunctionEventQueryGroupBySort): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.facet = facet
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
import telebot
import os
bot = telebot.TeleBot(
os.environ['TELEGRAM_BOT_TOKEN'],
parse_mode='HTML'
)
|
import numpy as np
import physics
import storage
import learning
import visualization
import multiprocessing
from itertools import repeat
import uuid
# The dimensions of storage in this project are as follows:
# Example: velocity
# Dimension 0: storage. [v_x, v_y, vz]
# Dimension 1: body nr [data_of_mass_0, data_1, data_2, ...]
# Dimension 2: time [data_t_0, data_t_1, data_t_2, ...]
# Adjust the initial conditions to whatever problem you want to solve
# problem = physics.system.AlphaCentauriSystem()
# problem = physics.system.EearthSunSystem()
# problem = physics.system.EearthSunMoonSystem()
# problem = physics.system.RandomNbodySystem(N=3, mass_dev=0., mass_mean=1.)
problem = physics.system.BreenSystem(N=3)
# Adjust to compare different integrators
# integrator = physics.integrator.OdeIntegrator()
# integrator = physics.integrator.SciPyIvpIntegrator(method='LSODA')
# integrator = physics.integrator.HamiltonianIntegrator()
# integrator = physics.integrator.SymplecticIntegrator()
integrator = physics.integrator.BrutusIntegrator()
def run_and_save_simulation(i, output_folder):
print(f"\n### Executing simulation {i}")
# Make a new random seed, or multiprocessing will cause every seperate process to create the same random numbers
np.random.seed()
# problem = physics.system.RandomNbodySystem(N=3, mass_dev=0., mass_mean=1.)
problem = physics.system.BreenSystem(N=3)
m = problem.m
r, v, t = integrator.integrate(10_000, problem)
# physics.analytics.asses()
energy = physics.energy.system_total(r, v, m)
relative_error = physics.analytics.get_relative_error(energy).max()
print(f"Max relative error: {relative_error:.1E}")
print(f"Saving solution")
storage.store_simulation(problem.m,
problem.r_init,
problem.v_init,
t,
# r[-1], v[-1],
r, v,
filename=f"{uuid.uuid1()}",
folder=output_folder)
print(f"Saved solution")
def create_data_set(num=1000, output_folder="output2"):
# run = lambda i: run_and_save_simulation(problem, integrator, output_folder, i)
with multiprocessing.Pool( ) as pool:
pool.starmap(run_and_save_simulation,
zip(range(num), repeat(output_folder)))
def newton_vs_the_machine(problem,
integrator,
model: learning.BaseModel,
num_periods=5,
set_timespan=0.05):
output = model.predict_timesteps(problem.N, problem.m, problem.r_init, problem.v_init,
num_timesteps=num_periods)
r_ml, v_ml = model.get_data_from_output(output, problem.N)
integrator.T = num_periods * set_timespan
r, v, t = integrator.integrate(10_000, problem)
return r_ml, v_ml, r, v
def main():
integrator.T = 2
# Create the dataset for training the model with
# dataset_folder = f"output/equal_mass_t{integrator.T}_rcm0_pcm0"
dataset_folder = f"output/breen_t{integrator.T}_rcm0_pcm0"
create_data_set(num=100000, output_folder=dataset_folder)
# Train a model
# sets = storage.load_sets_from_folder(dataset_folder)
layers = 10
nodes = 128
model_folder = f"output/2D{layers}Layers{nodes}Nodes{integrator.T}Timestep"
# if not learning.TwoDimensionalModel.exists(model_folder):
# model = learning.TwoDimensionalModel.new(N=3,
# num_layers_hidden=layers, num_nodes=nodes,
# storage_folder=model_folder)
# else:
# model = learning.TwoDimensionalModel.load(path=model_folder)
#
# resume_fitting(model, sets)
# model.fit_model(sets)
#
# # Execute Newton vs The Machine: See who performs better
# r_ml, v_ml, r, v = newton_vs_the_machine(problem, integrator,
# model,
# num_periods=10, set_timespan=integrator.T)
# Integrate a single problem
r, v, t = integrator.integrate(10_000, problem)
m = problem.m
N = problem.N
physics.analytics.assess(problem.m, problem.r_init, problem.v_init, r[-1], v[-1])
# Visualisation of results
# vis = visualization.show_trajectory(r, v, problem.N, show=False)
# visualization.show_trajectory(r_ml, v_ml, problem.N, *vis,
# show=True, alpha=0.45, linestyle='--')
# visualization.show_energy(r, v, m)
visualization.animate_trajectory_2d(r, v, N, m)
if __name__ == "__main__":
main()
|
# Various utility functions
from __future__ import print_function
from distutils.version import LooseVersion
import contextlib
import os
import random
import sys
import vim # pylint: disable=F0401
import tasklib
from taskwiki.errors import TaskWikiException
from taskwiki import regexp
# Detect if command AnsiEsc is available
ANSI_ESC_AVAILABLE = vim.eval('exists(":AnsiEsc")') == '2'
NEOVIM = (vim.eval('has("nvim")') == "1")
HAS_TERMINAL = (NEOVIM or (int(vim.eval("v:version")) >= 800))
def tw_modstring_to_args(line):
output = []
escape_global_chars = ('"', "'")
line = line.strip()
current_escape = None
current_part = ''
local_escape_pos = None
for i in range(len(line)):
char = line[i]
ignored = False
process_next_part = False
# If previous char was \, add to current part no matter what
if local_escape_pos == i - 1:
local_escape_pos = None
# If current char is \, use it as escape mark and ignore it
elif char == '\\':
local_escape_pos = i
ignored = True
# If current char is ' or ", open or close an escaped seq
elif char in escape_global_chars:
# First test if we're finishing an escaped sequence
if current_escape == char:
current_escape = None
ignored = True
# Do we have ' inside "" or " inside ''?
elif current_escape is not None:
pass
# Opening ' or "
else:
current_escape = char
ignored = True
elif current_escape is not None:
pass
elif char == ' ':
ignored = True
process_next_part = True
if not ignored:
current_part += char
if process_next_part and current_part:
output.append(current_part)
current_part = ''
if current_part:
output.append(current_part)
return output
def tw_modstring_to_kwargs(line):
args = tw_modstring_to_args(line)
return tw_args_to_kwargs(args)
def tw_args_to_kwargs(args):
output = dict()
for arg in args:
# If the argument contains :, then it's a key/value pair
if ':' in arg:
key, value = arg.split(':', 1)
# Ignore anything which is not one-word string of alpha chars
# This will skip over constructs with attribute modifiers
if key.isalpha():
output[key] = value if value != "" else None
# Tag addition
elif arg.startswith('+'):
value = arg[1:]
# Ignore virtual tags
if not value.isupper():
output.setdefault('tags', []).append(value)
# Ignore tag removal
return output
def get_input(prompt="Enter: ", allow_empty=False, completion=None):
if completion is not None:
value = vim.eval('input("%s", "", "%s")' % (prompt, completion))
else:
value = vim.eval('input("%s")' % prompt)
vim.command('redraw')
# Check for empty value and bail out if not allowed
if not value and not allow_empty:
raise TaskWikiException("Input must be provided.")
return value
def get_current_window():
"""
Returns a current window number. Provides a workaround for Neovim.
"""
try:
return vim.current.window.number - 1
except AttributeError:
return int(vim.eval('winnr()')) - 1
def get_buffer(number):
"""
Returns a buffer with specfied number. Provides a workaround for Neovim.
Note that vim.buffers may not contain all buffers with sequential numbers.
"""
buffers = [buffer for buffer in vim.buffers if buffer.number == number]
assert len(buffers) == 1
return buffers[0]
def convert_colorstring_for_vim(string):
BASIC_COLORS = [
"blue", "yellow", "green", "red",
"magneta", "yellow", "white", "black"
]
EFFECTS = ['bold']
def is_color(c):
return any([
c.startswith('color'),
c.startswith('rgb'),
c in BASIC_COLORS
])
def parse_color(c):
if c.startswith('color'):
return c[5:]
elif c.startswith('rgb'):
# TaskWarrior color cube notation, see 'task color'
red = int(c[3])
green = int(c[4])
blue = int(c[5])
index = 16 + red * 36 + green * 6 + blue;
return str(index)
else:
return c
foreground = None
background = None
effect = None
for part in string.split():
if is_color(part) and foreground is None:
foreground = parse_color(part)
elif is_color(part) and background is None:
background = parse_color(part)
elif part in EFFECTS:
effect = part
result = ''.join([
'cterm={0} '.format(effect) if effect else '',
'ctermfg={0} '.format(foreground) if foreground else '',
'ctermbg={0}'.format(background) if background else '',
])
return result
def get_buffer_shortname():
return vim.eval('expand("%")')
def get_absolute_filepath():
return vim.eval('expand("%:p")')
def get_current_line_number():
row, column = vim.current.window.cursor
return row - 1
def get_current_column_number():
row, column = vim.current.window.cursor
return column
def get_valid_tabpage_buffers(tabpage):
return [win.buffer for win in tabpage.windows if win.buffer.valid]
def buffer_shortname(buffer):
return os.path.basename(buffer.name)
def selected_line_numbers():
return range(vim.current.range.start, vim.current.range.end + 1)
def get_lines_above(including_current=True):
# Add 1 to the current line number if we want to include this line
bonus = 1 if including_current else 0
for line in reversed(range(0, get_current_line_number() + bonus)):
yield vim.current.buffer[line]
def strip_ansi_escape_sequence(string):
return regexp.ANSI_ESCAPE_SEQ.sub("", string)
def show_in_split(lines, size=None, position="belowright", vertical=False,
name="taskwiki", replace_opened=True,
activate_cursorline=False):
# If there is no output, bail
if not lines:
print("No output.", file=sys.stderr)
return
# Sanitaze the output
lines = [l.rstrip() for l in lines]
# If the multiple buffers with this name are not desired
# cloase all the old ones in this tabpage
if replace_opened:
for buf in get_valid_tabpage_buffers(vim.current.tabpage):
shortname = buffer_shortname(buf)
if shortname.startswith(name):
vim.command('bwipe {0}'.format(shortname))
# Generate a random suffix for the buffer name
# This is needed since AnsiEsc saves the buffer name inside
# s: scoped variables. Also lowers the probability of clash with
# a real file.
random_suffix = random.randint(1,100000)
name = '{0}.{1}'.format(name, random_suffix)
# Compute the size of the split
if size is None:
if vertical:
# Maximum number of columns used + small offset
# Strip the color codes, since they do not show up in the split
size = max([len(strip_ansi_escape_sequence(l)) for l in lines]) + 1
# If absolute maximum width was set, do not exceed it
if get_var('taskwiki_split_max_width'):
size = min(size, get_var('taskwiki_split_max_width'))
else:
# Number of lines
size = len(lines)
# If absolute maximum height was set, do not exceed it
if get_var('taskwiki_split_max_height'):
size = min(size, get_var('taskwiki_split_max_height'))
# Set cursorline in the window
cursorline_activated_in_window = None
if activate_cursorline and not vim.current.window.options['cursorline']:
vim.current.window.options['cursorline'] = True
cursorline_activated_in_window = get_current_window()
# Call 'vsplit' for vertical, otherwise 'split'
vertical_prefix = 'v' if vertical else ''
vim.command("{0} {1}{2}split".format(position, size, vertical_prefix))
vim.command("edit {0}".format(name))
# For some weird reason, edit does not work for some users, but
# enew + file <name> does. Use as fallback.
if get_buffer_shortname() != name:
vim.command("enew")
vim.command("file {0}".format(name))
# If we were still unable to open the buffer, bail out
if get_buffer_shortname() != name:
print("Unable to open a new buffer with name: {0}".format(name))
return
# We're good to go!
vim.command("setlocal noswapfile")
vim.command("setlocal modifiable")
vim.current.buffer.append(lines, 0)
vim.command("setlocal readonly")
vim.command("setlocal nomodifiable")
vim.command("setlocal buftype=nofile")
vim.command("setlocal nowrap")
vim.command("setlocal nonumber")
# Keep window size fixed despite resizing
vim.command("setlocal winfixheight")
vim.command("setlocal winfixwidth")
# Make the split easily closable
vim.command("nnoremap <silent> <buffer> q :bwipe<CR>")
vim.command("nnoremap <silent> <buffer> <enter> :bwipe<CR>")
# Remove cursorline in original window if it was this split which set it
if cursorline_activated_in_window is not None:
vim.command("au BufLeave,BufDelete,BufWipeout <buffer> "
+ get_var('taskwiki_py') +
" vim.windows[{0}].options['cursorline']=False"
.format(cursorline_activated_in_window))
if ANSI_ESC_AVAILABLE:
vim.command("AnsiEsc")
def tw_execute_colorful(tw, *args, **kwargs):
override = kwargs.setdefault('config_override', {})
maxwidth = kwargs.pop('maxwidth', False)
maxheight = kwargs.pop('maxheight', False)
if ANSI_ESC_AVAILABLE:
override['_forcecolor'] = "yes"
if maxheight:
override['defaultheight'] = vim.current.window.height
if maxwidth:
override['defaultwidth'] = vim.current.window.width
return tw_execute_safely(tw, *args, **kwargs)
def tw_execute_safely(tw, *args, **kwargs):
kwargs['allow_failure'] = False
kwargs['return_all'] = True
out, err, rc = tw.execute_command(*args, **kwargs)
if rc == 0:
return out
else:
# In case of failure, print everything as os output
# Left for debug mode
# for line in itertools.chain(out, err[:-1]):
# print(line)
# Display the last line as failure
if err:
print(err[-1], file=sys.stderr)
@contextlib.contextmanager
def current_line_highlighted():
original_value = vim.current.window.options['cursorline']
original_window_number = get_current_window()
vim.current.window.options['cursorline'] = True
vim.command('redraw')
try:
yield
finally:
original_window = vim.windows[original_window_number]
original_window.options['cursorline'] = original_value
@contextlib.contextmanager
def current_line_preserved():
"""
Make sure the current line is preserved during the operation by marking it
first and later restoring it with the :number command, i.e. :42.
"""
current_line = get_current_line_number() + 1
yield
vim.command('{0}'.format(current_line))
def enforce_dependencies(cache):
# Vim version is already checked in vimscript file
# This is done so that we avoid problems with +python
TASKLIB_VERSION = '2.2.1'
TASKWARRIOR_VERSION = '2.4.0'
# Check tasklib version
tasklib_module_version = getattr(tasklib, '__version__', '2.2.0')
tasklib_installed_version = LooseVersion(tasklib_module_version)
tasklib_required_version = LooseVersion(TASKLIB_VERSION)
if tasklib_required_version > tasklib_installed_version:
raise TaskWikiException("Tasklib version at least %s is required."
% TASKLIB_VERSION)
# Check taskwarrior version
tw = cache.warriors['default']
taskwarrior_installed_version = LooseVersion(tw.version)
taskwarrior_required_version = LooseVersion(TASKWARRIOR_VERSION)
if taskwarrior_required_version > taskwarrior_installed_version:
raise TaskWikiException("Taskwarrior version at least %s is required."
% TASKWARRIOR_VERSION)
def decode_bytes(var):
"""
Data structures obtained from vim under python3 will return bytestrings.
Neovim under python3 will return str.
Make sure we can handle that.
"""
if NEOVIM:
return var
if isinstance(var, bytes):
return var.decode()
if isinstance(var, list):
return list([decode_bytes(element) for element in var])
if isinstance(var, dict) or 'vim.dictionary' in str(type(var)):
return {
decode_bytes(key): decode_bytes(value)
for key, value in var.items()
}
return var
def get_var(name, default=None, vars_obj=None):
"""
Provide a layer for getting a variable value out of vim, consistent over
vim+py22/vim+py3/neovim combinations.
Params:
default - default value, returned when variable is not found
vars - used vars object, defaults to vim.vars
"""
vars_obj = vars_obj or vim.vars
value = vars_obj.get(name)
if value is None:
return default
else:
return decode_bytes(value)
def is_midnight(dt):
"""
Determines if given datetime object is set to midnight.
"""
return dt.hour == 0 and dt.minute == 0 and dt.second == 0
|
from test import support
from test.support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest
import unittest
import operator
import sys
import functools
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, but don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEquals or similar. It's a
# lengthy operation and the errormessage will be utterly useless due to
# its size. To make sure whether a result has the right contents, better
# to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - While the bigmemtest decorator speaks of 'minsize', all tests will
# actually be called with a much smaller number too, in the normal
# test run (5Kb currently.) This is so the tests themselves get frequent
# testing. Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
# BEWARE: it seems that one failing test can yield other subsequent tests to
# fail as well. I do not know whether it is due to memory fragmentation
# issues, or other specifics of the platform malloc() routine.
character_size = 4 if sys.maxunicode > 0xFFFF else 2
class BaseStrTest:
@bigmemtest(minsize=_2G, memuse=2)
def test_capitalize(self, size):
_ = self.from_latin1
SUBSTR = self.from_latin1(' abc def ghi')
s = _('-') * size + SUBSTR
caps = s.capitalize()
self.assertEquals(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEquals(caps.lstrip(_('-')), SUBSTR)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = self.from_latin1(' abc def ghi')
s = SUBSTR.center(size)
self.assertEquals(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEquals(s[lpadsize:-rpadsize], SUBSTR)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_count(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('.') * size + SUBSTR
self.assertEquals(s.count(_('.')), size)
s += _('.')
self.assertEquals(s.count(_('.')), size + 1)
self.assertEquals(s.count(_(' ')), 3)
self.assertEquals(s.count(_('i')), 1)
self.assertEquals(s.count(_('j')), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_endswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = _('...') + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith(_('a') + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_expandtabs(self, size):
_ = self.from_latin1
s = _('-') * size
tabsize = 8
self.assertEquals(s.expandtabs(), s)
del s
slen, remainder = divmod(size, tabsize)
s = _(' \t') * slen
s = s.expandtabs(tabsize)
self.assertEquals(len(s), size - remainder)
self.assertEquals(len(s.strip(_(' '))), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_find(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.find(_(' ')), 0)
self.assertEquals(s.find(SUBSTR), 0)
self.assertEquals(s.find(_(' '), sublen), sublen + size)
self.assertEquals(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEquals(s.find(_('i')), SUBSTR.find(_('i')))
self.assertEquals(s.find(_('i'), sublen),
sublen + size + SUBSTR.find(_('i')))
self.assertEquals(s.find(_('i'), size),
sublen + size + SUBSTR.find(_('i')))
self.assertEquals(s.find(_('j')), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_index(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.index(_(' ')), 0)
self.assertEquals(s.index(SUBSTR), 0)
self.assertEquals(s.index(_(' '), sublen), sublen + size)
self.assertEquals(s.index(SUBSTR, sublen), sublen + size)
self.assertEquals(s.index(_('i')), SUBSTR.index(_('i')))
self.assertEquals(s.index(_('i'), sublen),
sublen + size + SUBSTR.index(_('i')))
self.assertEquals(s.index(_('i'), size),
sublen + size + SUBSTR.index(_('i')))
self.assertRaises(ValueError, s.index, _('j'))
@bigmemtest(minsize=_2G, memuse=2)
def test_isalnum(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalnum())
s += _('.')
self.assertFalse(s.isalnum())
@bigmemtest(minsize=_2G, memuse=2)
def test_isalpha(self, size):
_ = self.from_latin1
SUBSTR = _('zzzzzzz')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalpha())
s += _('.')
self.assertFalse(s.isalpha())
@bigmemtest(minsize=_2G, memuse=2)
def test_isdigit(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('9') * size + SUBSTR
self.assertTrue(s.isdigit())
s += _('z')
self.assertFalse(s.isdigit())
@bigmemtest(minsize=_2G, memuse=2)
def test_islower(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).isupper()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += _('A')
self.assertFalse(s.islower())
@bigmemtest(minsize=_2G, memuse=2)
def test_isspace(self, size):
_ = self.from_latin1
whitespace = _(' \f\n\r\t\v')
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += _('j')
self.assertFalse(s.isspace())
@bigmemtest(minsize=_2G, memuse=2)
def test_istitle(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('').join([_('A'), _('a') * size, SUBSTR])
self.assertTrue(s.istitle())
s += _('A')
self.assertTrue(s.istitle())
s += _('aA')
self.assertFalse(s.istitle())
@bigmemtest(minsize=_2G, memuse=2)
def test_isupper(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).islower()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += _('a')
self.assertFalse(s.isupper())
@bigmemtest(minsize=_2G, memuse=2)
def test_join(self, size):
_ = self.from_latin1
s = _('A') * size
x = s.join([_('aaaaa'), _('bbbbb')])
self.assertEquals(x.count(_('a')), 5)
self.assertEquals(x.count(_('b')), 5)
self.assertTrue(x.startswith(_('aaaaaA')))
self.assertTrue(x.endswith(_('Abbbbb')))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_ljust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_lower(self, size):
_ = self.from_latin1
s = _('A') * size
s = s.lower()
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('a')), size)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_lstrip(self, size):
_ = self.from_latin1
SUBSTR = _('abc def ghi')
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_replace(self, size):
_ = self.from_latin1
replacement = _('a')
s = _(' ') * size
s = s.replace(_(' '), replacement)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), size)
s = s.replace(replacement, _(' '), size - 4)
self.assertEquals(len(s), size)
self.assertEquals(s.count(replacement), 4)
self.assertEquals(s[-10:], _(' aaaa'))
@bigmemtest(minsize=_2G, memuse=2)
def test_rfind(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.rfind(_(' ')), sublen + size + SUBSTR.rfind(_(' ')))
self.assertEquals(s.rfind(SUBSTR), sublen + size)
self.assertEquals(s.rfind(_(' '), 0, size), SUBSTR.rfind(_(' ')))
self.assertEquals(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rfind(_('i')), sublen + size + SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('i'), 0, sublen), SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('i'), 0, sublen + size),
SUBSTR.rfind(_('i')))
self.assertEquals(s.rfind(_('j')), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_rindex(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEquals(s.rindex(_(' ')),
sublen + size + SUBSTR.rindex(_(' ')))
self.assertEquals(s.rindex(SUBSTR), sublen + size)
self.assertEquals(s.rindex(_(' '), 0, sublen + size - 1),
SUBSTR.rindex(_(' ')))
self.assertEquals(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEquals(s.rindex(_('i')),
sublen + size + SUBSTR.rindex(_('i')))
self.assertEquals(s.rindex(_('i'), 0, sublen), SUBSTR.rindex(_('i')))
self.assertEquals(s.rindex(_('i'), 0, sublen + size),
SUBSTR.rindex(_('i')))
self.assertRaises(ValueError, s.rindex, _('j'))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rjust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rstrip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(minsize=_2G, memuse=2.1)
def test_split_small(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = _('a') + _(' ') * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEquals(len(l), chunksize)
expected = _('a')
for item in l:
self.assertEquals(item, expected)
del l
l = s.split(_('a'))
self.assertEquals(len(l), chunksize + 1)
expected = _(' ') * chunksize
for item in filter(None, l):
self.assertEquals(item, expected)
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(minsize=_2G + 5, memuse=10)
def test_split_large(self, size):
_ = self.from_latin1
s = _(' a') * size + _(' ')
l = s.split()
self.assertEquals(len(l), size)
self.assertEquals(set(l), set([_('a')]))
del l
l = s.split(_('a'))
self.assertEquals(len(l), size + 1)
self.assertEquals(set(l), set([_(' ')]))
@bigmemtest(minsize=_2G, memuse=2.1)
def test_splitlines(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = _(' ') * chunksize + _('\n') + _(' ') * chunksize + _('\r\n')
s = SUBSTR * chunksize
l = s.splitlines()
self.assertEquals(len(l), chunksize * 2)
expected = _(' ') * chunksize
for item in l:
self.assertEquals(item, expected)
@bigmemtest(minsize=_2G, memuse=2)
def test_startswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith(_('-') * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(minsize=_2G, memuse=1)
def test_strip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi ')
s = SUBSTR.rjust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEquals(len(s), size)
self.assertEquals(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_swapcase(self, size):
_ = self.from_latin1
SUBSTR = _("aBcDeFG12.'\xa9\x00")
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEquals(len(s), sublen * repeats)
self.assertEquals(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEquals(s[-sublen * 3:], SUBSTR.swapcase() * 3)
@bigmemtest(minsize=_2G, memuse=2)
def test_title(self, size):
_ = self.from_latin1
SUBSTR = _('SpaaHAaaAaham')
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(minsize=_2G, memuse=2)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
if isinstance(SUBSTR, str):
trans = {
ord(_('.')): _('-'),
ord(_('a')): _('!'),
ord(_('Z')): _('$'),
}
else:
trans = bytes.maketrans(b'.aZ', b'-!$')
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEquals(len(s), repeats * sublen)
self.assertEquals(s[:sublen], SUBSTR.translate(trans))
self.assertEquals(s[-sublen:], SUBSTR.translate(trans))
self.assertEquals(s.count(_('.')), 0)
self.assertEquals(s.count(_('!')), repeats * 2)
self.assertEquals(s.count(_('z')), repeats * 3)
@bigmemtest(minsize=_2G + 5, memuse=2)
def test_upper(self, size):
_ = self.from_latin1
s = _('a') * size
s = s.upper()
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('A')), size)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_zfill(self, size):
_ = self.from_latin1
SUBSTR = _('-568324723598234')
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith(_('0') + SUBSTR[1:]))
self.assertTrue(s.startswith(_('-0')))
self.assertEquals(len(s), size)
self.assertEquals(s.count(_('0')), size - len(SUBSTR))
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_concat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEquals(len(s), size)
s = s + s
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count(_('.')), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_repeat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEquals(len(s), size)
s = s * 2
self.assertEquals(len(s), size * 2)
self.assertEquals(s.count(_('.')), size * 2)
@bigmemtest(minsize=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEquals(s[i], SUBSTR[0])
self.assertEquals(s[i:i + sublen], SUBSTR)
self.assertEquals(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEquals(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEquals(s[len(s) - 1], SUBSTR[-1])
self.assertEquals(s[-1], SUBSTR[-1])
self.assertEquals(s[len(s) - 10], SUBSTR[0])
self.assertEquals(s[-sublen], SUBSTR[0])
self.assertEquals(s[len(s):], _(''))
self.assertEquals(s[len(s) - 1:], SUBSTR[-1:])
self.assertEquals(s[-1:], SUBSTR[-1:])
self.assertEquals(s[len(s) - sublen:], SUBSTR)
self.assertEquals(s[-sublen:], SUBSTR)
self.assertEquals(len(s[:]), len(s))
self.assertEquals(len(s[:len(s) - 5]), len(s) - 5)
self.assertEquals(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(minsize=_2G, memuse=2)
def test_contains(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
edge = _('-') * (size // 2)
s = _('').join([edge, SUBSTR, edge])
del edge
self.assertTrue(SUBSTR in s)
self.assertFalse(SUBSTR * 2 in s)
self.assertTrue(_('-') in s)
self.assertFalse(_('a') in s)
s += _('a')
self.assertTrue(_('a') in s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_compare(self, size):
_ = self.from_latin1
s1 = _('-') * size
s2 = _('-') * size
self.assertEqual(s1, s2)
del s2
s2 = s1 + _('a')
self.assertFalse(s1 == s2)
del s2
s2 = _('.') * size
self.assertFalse(s1 == s2)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
_ = self.from_latin1
s = _('\x00') * size
h1 = hash(s)
del s
s = _('\x00') * (size + 1)
self.assertFalse(h1 == hash(s))
class StrTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s
def basic_encode_test(self, size, enc, c='.', expectedsize=None):
if expectedsize is None:
expectedsize = size
s = c * size
self.assertEquals(len(s.encode(enc)), expectedsize)
def setUp(self):
# HACK: adjust memory use of tests inherited from BaseStrTest
# according to character size.
self._adjusted = {}
for name in dir(BaseStrTest):
if not name.startswith('test_'):
continue
meth = getattr(type(self), name)
try:
memuse = meth.memuse
except AttributeError:
continue
meth.memuse = character_size * memuse
self._adjusted[name] = memuse
def tearDown(self):
for name, memuse in self._adjusted.items():
getattr(type(self), name).memuse = memuse
@bigmemtest(minsize=_2G + 2, memuse=character_size + 1)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@precisionbigmemtest(size=_4G // 6 + 2, memuse=character_size + 1)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 5 + 70, memuse=character_size + 1)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 4 + 5, memuse=character_size + 4)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_2G - 1, memuse=character_size + 1)
def test_encode_ascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=character_size * (6 + 1))
def test_unicode_repr_overflow(self, size):
try:
s = "\uAAAA"*size
r = repr(s)
except MemoryError:
pass # acceptable on 32-bit
else:
self.assertTrue(s == eval(r))
@bigmemtest(minsize=_2G + 10, memuse=character_size * 2)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertEqual(s, sf)
del sf
sf = '..%s..' % (s,)
self.assertEquals(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEquals(len(s), size * 2 + 3)
self.assertEquals(s.count('.'), 3)
self.assertEquals(s.count('-'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=character_size * 2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEquals(len(s), size + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=character_size * 5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEquals(len(s), size * 4 + 2)
self.assertEquals(s[0], "'")
self.assertEquals(s[-1], "'")
self.assertEquals(s.count('\\'), size)
self.assertEquals(s.count('0'), size * 2)
@bigmemtest(minsize=2**32 / 5, memuse=character_size * 7)
def test_unicode_repr(self, size):
s = "\uAAAA" * size
for f in (repr, ascii):
r = f(s)
self.assertTrue(len(r) > size)
self.assertTrue(r.endswith(r"\uaaaa'"), r[-10:])
del r
# The character takes 4 bytes even in UCS-2 builds because it will
# be decomposed into surrogates.
@bigmemtest(minsize=2**32 / 5, memuse=4 + character_size * 9)
def test_unicode_repr_wide(self, size):
s = "\U0001AAAA" * size
for f in (repr, ascii):
r = f(s)
self.assertTrue(len(r) > size)
self.assertTrue(r.endswith(r"\U0001aaaa'"), r[-12:])
del r
class BytesTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s.encode("latin1")
@bigmemtest(minsize=_2G + 2, memuse=1 + character_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEquals(len(s.decode('utf-8')), size)
class BytearrayTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return bytearray(s.encode("latin1"))
@bigmemtest(minsize=_2G + 2, memuse=1 + character_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEquals(len(s.decode('utf-8')), size)
test_hash = None
test_split_large = None
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = ('',) * size
t2 = ('',) * size
self.assertEqual(t1, t2)
del t2
t2 = ('',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEquals(len(t), size)
t = t + t
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEquals(len(t), size * 5)
self.assertTrue(5 in t)
self.assertFalse((1, 2, 3, 4, 5) in t)
self.assertFalse(0 in t)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEquals(len(t), size)
self.assertEquals(t[-1], None)
self.assertEquals(t[5], None)
self.assertEquals(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEquals(t[:5], (None,) * 5)
self.assertEquals(t[-5:], (None,) * 5)
self.assertEquals(t[20:25], (None,) * 5)
self.assertEquals(t[-25:-20], (None,) * 5)
self.assertEquals(t[size - 5:], (None,) * 5)
self.assertEquals(t[size - 5:size], (None,) * 5)
self.assertEquals(t[size - 6:size - 2], (None,) * 4)
self.assertEquals(t[size:size], ())
self.assertEquals(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEquals(len(t), size)
t = t * 2
self.assertEquals(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(range(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(range(size))
count = 0
for item in t:
self.assertEquals(item, count)
count += 1
self.assertEquals(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '(0, 0')
self.assertEquals(s[-5:], '0, 0)')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [''] * size
l2 = [''] * size
self.assertEqual(l1, l2)
del l2
l2 = [''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEquals(len(l), size)
l = l + l
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEquals(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(len(l), size * 5)
self.assertTrue(5 in l)
self.assertFalse([1, 2, 3, 4, 5] in l)
self.assertFalse(0 in l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEquals(len(l), size)
self.assertEquals(l[-1], None)
self.assertEquals(l[5], None)
self.assertEquals(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEquals(l[:5], [None] * 5)
self.assertEquals(l[-5:], [None] * 5)
self.assertEquals(l[20:25], [None] * 5)
self.assertEquals(l[-25:-20], [None] * 5)
self.assertEquals(l[size - 5:], [None] * 5)
self.assertEquals(l[size - 5:size], [None] * 5)
self.assertEquals(l[size - 6:size - 2], [None] * 4)
self.assertEquals(l[size:size], [])
self.assertEquals(l[size:size+5], [])
l[size - 2] = 5
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [None, 5, None])
self.assertEquals(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEquals(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 4)
del l[-2:]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[-1], 2)
del l[0]
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[0], 2)
del l[:2]
size -= 2
self.assertEquals(len(l), size)
self.assertEquals(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEquals(len(l), size)
l = l * 2
self.assertEquals(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEquals(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEquals(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEquals(len(s), size * 3)
self.assertEquals(s[:5], '[0, 0')
self.assertEquals(s[-5:], '0, 0]')
self.assertEquals(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEquals(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEquals(l.count(1), size)
self.assertEquals(l.count("1"), 0)
def basic_test_extend(self, size):
l = [object] * size
l.extend(l)
self.assertEquals(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1, 2, 3, 4, 5] * size
size *= 5
self.assertEquals(l.index(1), 0)
self.assertEquals(l.index(5, size - 5), size - 1)
self.assertEquals(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[:3], [1.0, "C", 1.0])
self.assertEquals(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = ["a", "b", "c", "d", "e"] * size
size *= 5
self.assertEquals(len(l), size)
item = l.pop()
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "e")
self.assertEquals(l[-2:], ["c", "d"])
item = l.pop(0)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "a")
self.assertEquals(l[:2], ["b", "c"])
item = l.pop(size - 2)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(item, "c")
self.assertEquals(l[-2:], ["b", "d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEquals(len(l), size)
l.remove(10)
size -= 1
self.assertEquals(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEquals(len(l), size)
self.assertEquals(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEquals(len(l), size * 5)
self.assertEquals(l[-5:], [5, 4, 3, 2, 1])
self.assertEquals(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEquals(len(l), size * 5)
self.assertEquals(l.count(1), size)
self.assertEquals(l[:10], [1] * 10)
self.assertEquals(l[-10:], [5] * 10)
def test_main():
support.run_unittest(StrTest, BytesTest, BytearrayTest,
TupleTest, ListTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
|
# -*- coding: utf-8 -*-
import asyncio
import logging
import os
import random
import re
import shutil
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from urllib.parse import quote
import discord
from ...core.app import App
from ...sources import crawler_list
from ...utils.uploader import upload
from .config import max_workers, public_ip, public_path
logger = logging.getLogger(__name__)
available_formats = [
'epub',
'text',
'web',
'mobi',
'pdf',
'fb2',
]
disable_search = os.getenv('DISCORD_DISABLE_SEARCH') == 'true'
class MessageHandler:
def __init__(self, client):
self.app = App()
self.client = client
self.state = None
self.executor = ThreadPoolExecutor(max_workers)
self.last_activity = datetime.now()
self.closed = False
# end def
def process(self, message):
self.last_activity = datetime.now()
self.executor.submit(self.handle_message, message)
# end def
def destroy(self):
try:
self.client.handlers.pop(str(self.user.id))
self.send_sync('Closing current session...')
self.executor.shutdown(wait=False)
self.app.destroy()
# shutil.rmtree(self.app.output_path, ignore_errors=True)
except Exception:
logger.exception('While destroying MessageHandler')
finally:
self.send_sync('Session closed. Send *start* to start over')
self.closed = True
# end try
# end def
def handle_message(self, message):
self.message = message
self.user = message.author
if not self.state:
self.state = self.get_novel_url
# end if
try:
self.state()
except Exception as ex:
logger.exception('Failed to process state')
self.send_sync('Something went wrong!\n`%s`' % str(ex))
self.executor.submit(self.destroy)
# end try
# end def
# ---------------------------------------------------------------------- #
def wait_for(self, async_coroutine):
asyncio.run_coroutine_threadsafe(
async_coroutine,
self.client.loop
).result()
# end def3
async def send(self, *contents):
if self.closed:
return
self.last_activity = datetime.now()
async with self.user.typing():
for text in contents:
if not text:
continue
# end if
await self.user.send(text)
# end for
# end with
# end def
def send_sync(self, *contents):
self.wait_for(self.send(*contents))
# end def
def busy_state(self):
text = self.message.content.strip()
if text == '!cancel':
self.executor.submit(self.destroy)
return
# end if
self.send_sync(random.choice([
'Send !cancel to stop this session.',
'Please wait...',
'Processing, give me more time...',
'I am just a bot. Please be patient...',
'Waiting for more RAM...',
'A little bit longer...',
'I\'ll be with you in a bit...',
'Patience! This is difficult, you know...',
]))
# end def
# ---------------------------------------------------------------------- #
def get_novel_url(self):
self.state = self.busy_state
if disable_search:
self.send_sync(
'Send me an URL of novel info page with chapter list!'
)
else:
self.send_sync(
'I recognize these two categories:\n'
'- Profile page url of a lightnovel.\n'
'- A query to search your lightnovel.',
'What are you looking for?'
)
# end if
self.state = self.handle_novel_url
# end def
def handle_novel_url(self):
self.state = self.busy_state
text = self.message.content.strip()
if text == '!cancel':
self.executor.submit(self.destroy)
return
# end if
try:
self.app.user_input = self.message.content.strip()
self.app.init_search()
except Exception:
self.send_sync('\n'.join([
'Sorry! I do not recognize this sources yet.',
'See list of supported sources here:',
'https://github.com/dipu-bd/lightnovel-crawler#c3-supported-sources',
]))
self.get_novel_url()
# end try
if self.app.crawler:
self.send_sync('Got your page link')
self.get_novel_info()
elif len(self.app.user_input) < 4:
self.send_sync('Your query is too short')
self.state = self.handle_novel_url
self.get_novel_url()
else:
if disable_search:
self.send_sync(
'Sorry! I can not do searching.\n'
'Please use Google to find your novel first'
)
self.get_novel_url()
else:
self.send_sync(
'Searching %d sources for "%s"\n' % (
len(self.app.crawler_links), self.app.user_input),
)
self.display_novel_selection()
# end if
# end if
# end def
# ------------------------------------------------------------ #
# SEARCHING -- skips if DISCORD_DISABLE_SEARCH is 'true'
# ------------------------------------------------------------ #
def display_novel_selection(self):
self.app.search_novel()
if self.closed:
return
if len(self.app.search_results) == 0:
self.send_sync('No novels found for "%s"' % self.app.user_input)
self.state = self.handle_novel_url
elif len(self.app.search_results) == 1:
self.selected_novel = self.app.search_results[0]
self.display_sources_selection()
else:
self.send_sync('\n'.join([
'Found %d novels:' % len(self.app.search_results)
] + [
'%d. **%s** `%d sources`' % (
i + 1,
item['title'],
len(item['novels'])
) for i, item in enumerate(self.app.search_results)
] + [
'',
'Enter name or index of your novel.',
'Send `!cancel` to stop this session.'
]))
self.state = self.handle_novel_selection
# end if
# end def
def handle_novel_selection(self):
self.state = self.busy_state
text = self.message.content.strip()
if text.startswith('!cancel'):
self.get_novel_url()
return
# end if
match_count = 0
selected = None
for i, res in enumerate(self.app.search_results):
if str(i + 1) == text:
selected = res
match_count += 1
elif text.isdigit() or len(text) < 3:
pass
elif res['title'].lower().find(text) != -1:
selected = res
match_count += 1
# end if
# end for
if match_count != 1:
self.send_sync(
'Sorry! You should select *one* novel from the list (%d selected).' % match_count)
self.display_novel_selection()
return
# end if
self.selected_novel = selected
self.display_sources_selection()
# end def
def display_sources_selection(self):
novel_list = self.selected_novel['novels']
self.send_sync('**%s** is found in %d sources:\n' %
(self.selected_novel['title'], len(novel_list)))
for j in range(0, len(novel_list), 10):
self.send_sync('\n'.join([
'%d. <%s> %s' % (
(j + i + 1),
item['url'],
item['info'] if 'info' in item else ''
) for i, item in enumerate(novel_list[j:j+10])
]))
# end for
self.send_sync('\n'.join([
'',
'Enter index or name of your source.',
'Send `!cancel` to stop this session.',
]))
self.state = self.handle_sources_to_search
# end def
def handle_sources_to_search(self):
self.state = self.busy_state
if len(self.selected_novel['novels']) == 1:
novel = self.selected_novel['novels'][0]
return self.handle_search_result(novel)
# end if
text = self.message.content.strip()
if text.startswith('!cancel'):
return self.get_novel_url()
# end if
match_count = 0
selected = None
for i, res in enumerate(self.selected_novel['novels']):
if str(i + 1) == text:
selected = res
match_count += 1
elif text.isdigit() or len(text) < 3:
pass
elif res['url'].lower().find(text) != -1:
selected = res
match_count += 1
# end if
# end for
if match_count != 1:
self.send_sync(
'Sorry! You should select *one* source from the list (%d selected).' % match_count)
return self.display_sources_selection()
# end if
self.handle_search_result(selected)
# end def
def handle_search_result(self, novel):
self.send_sync('Selected: %s' % novel['url'])
self.app.init_crawler(novel['url'])
self.get_novel_info()
# end def
# ---------------------------------------------------------------------- #
def get_novel_info(self):
# TODO: Handle login here
self.send_sync('Getting information about your novel...')
self.executor.submit(self.download_novel_info)
# end def
def download_novel_info(self):
self.state = self.busy_state
try:
self.app.get_novel_info()
if self.closed:
return
except Exception as ex:
logger.exception('Failed to get novel info')
self.send_sync('Failed to get novel info.\n`%s`' % str(ex))
self.executor.submit(self.destroy)
# end try
# Setup output path
root = os.path.abspath('.discord_bot_output')
if public_path and os.path.exists(public_path):
root = os.path.abspath(public_path)
# end if
good_name = os.path.basename(self.app.output_path)
output_path = os.path.join(root, str(self.user.id), good_name)
if os.path.exists(output_path):
shutil.rmtree(output_path, ignore_errors=True)
# end if
os.makedirs(output_path, exist_ok=True)
self.app.output_path = output_path
self.display_range_selection()
# end def
def display_range_selection(self):
self.send_sync('\n'.join([
'Now you choose what to download:',
'- Send `!cancel` to stop this session.',
'- Send `all` to download all chapters',
'- Send `last 20` to download last 20 chapters. Choose any number you want.',
'- Send `first 10` for first 10 chapters. Choose any number you want.',
'- Send `volume 2 5` to download download volume 2 and 5. Pass as many numbers you need.',
'- Send `chapter 110 120` to download chapter 110 to 120. Only two numbers are accepted.',
]))
self.send_sync(
'**It has `%d` volumes and `%d` chapters.**' % (
len(self.app.crawler.volumes),
len(self.app.crawler.chapters)
)
)
self.state = self.handle_range_selection
# end def
def handle_range_selection(self):
self.state = self.busy_state
text = self.message.content.strip().lower()
if text == '!cancel':
self.executor.submit(self.destroy)
return
# end if
if text == 'all':
self.app.chapters = self.app.crawler.chapters[:]
elif re.match(r'^first(\s\d+)?$', text):
text = text[len('first'):].strip()
n = int(text) if text.isdigit() else 50
n = 50 if n < 0 else n
self.app.chapters = self.app.crawler.chapters[: n]
elif re.match(r'^last(\s\d+)?$', text):
text = text[len('last'):].strip()
n = int(text) if text.isdigit() else 50
n = 50 if n < 0 else n
self.app.chapters = self.app.crawler.chapters[-n:]
elif re.match(r'^volume(\s\d+)+$', text):
text = text[len('volume'):].strip()
selected = re.findall(r'\d+', text)
self.send_sync(
'Selected volumes: ' + ', '.join(selected),
)
selected = [int(x) for x in selected]
self.app.chapters = [
chap for chap in self.app.crawler.chapters
if selected.count(chap['volume']) > 0
]
elif re.match(r'^chapter(\s\d+)+$', text):
text = text[len('chapter'):].strip()
pair = text.split(' ')
if len(pair) == 2:
def resolve_chapter(name):
cid = 0
if name.isdigit():
cid = int(name)
else:
cid = self.app.crawler.get_chapter_index_of(name)
# end if
return cid - 1
# end def
first = resolve_chapter(pair[0])
second = resolve_chapter(pair[1])
if first > second:
second, first = first, second
# end if
if first >= 0 or second < len(self.app.crawler.chapters):
self.app.chapters = self.app.crawler.chapters[first: second]
# end if
# end if
if len(self.app.chapters) == 0:
self.send_sync('Chapter range is not valid. Please try again')
self.state = self.handle_range_selection
return
# end if
else:
self.send_sync(
'Sorry! I did not recognize your input. Please try again')
self.state = self.handle_range_selection
return
# end if
if len(self.app.chapters) == 0:
self.send_sync(
'You have not selected any chapters. Please select at least one')
self.state = self.handle_range_selection
return
# end if
self.send_sync('Got your range selection')
self.display_output_selection()
# end def
def display_output_selection(self):
self.state = self.busy_state
self.send_sync('\n'.join([
'Now you can choose book formats to download:',
'- Send `!cancel` to stop.',
# '- Send `!all` to download all formats _(it may take a very very long time!)_',
'To select specific output formats:',
'- Send `pdf` to download only pdf format **(Large number of chapters may exceed the RAM and process will be terminated without any warning. Pls use `epub`)**',
'- Send `epub pdf` to download both epub and pdf formats. **(This May also exceed the RAM. Pls use `epub` only)**',
'- Send `{space separated format names}` for multiple formats **(consider using only a single format)**',
'Available formats: `' + '` `'.join(available_formats) + '`',
]))
self.state = self.handle_output_selection
# end def
def handle_output_selection(self):
self.state = self.busy_state
text = self.message.content.strip()
if text.startswith('!cancel'):
self.get_novel_url()
return
# end if
output_format = set(re.findall(
'|'.join(available_formats), text.lower()))
if not len(output_format):
output_format = set(available_formats)
self.send_sync('Sorry! I did not recognize your input. ' +
'By default, I shall generate in (%s) formats.' % (', ' .join(output_format)))
# end if
self.app.output_formats = {x: (x in output_format)
for x in available_formats}
self.send_sync('I will generate e-book in (%s) format' %
(', ' .join(output_format)))
self.send_sync('\n'.join([
'Starting download...',
'Send anything to view status.',
'Send `!cancel` to stop it.',
]))
self.executor.submit(self.start_download)
# end def
# ---------------------------------------------------------------------- #
def start_download(self):
self.app.pack_by_volume = False
try:
self.send_sync(
'**%s**' % self.app.crawler.novel_title,
'Downloading %d chapters...' % len(self.app.chapters),
)
self.app.start_download()
self.send_sync('Download complete.')
if self.closed:
return
self.send_sync('Binding books...')
self.app.bind_books()
self.send_sync('Book binding completed.')
if self.closed:
return
self.send_sync('Compressing output folder...')
self.app.compress_books()
self.send_sync('Compressed output folder.')
if self.closed:
return
if public_ip and public_path and os.path.exists(public_path):
self.send_sync('Publishing files...')
self.publish_files()
else:
self.send_sync('Uploading files...')
for archive in self.app.archived_outputs:
self.upload_file(archive)
# end for
# end if
except Exception as ex:
logger.exception('Failed to download')
self.send_sync('Download failed!\n`%s`' % str(ex))
self.executor.submit(self.destroy)
finally:
self.executor.submit(self.destroy)
# end try
# end def
def publish_files(self):
try:
download_url = '%s/%s/%s' % (public_ip.strip('/'),
quote(str(self.user.id)),
quote(os.path.basename(self.app.output_path)))
self.send_sync('Download files from:\n' + download_url)
except Exception:
logger.exception('Fail to publish')
# end try
# end def
def upload_file(self, archive):
# Check file size
file_size = os.stat(archive).st_size
if file_size > 7.99 * 1024 * 1024:
self.send_sync(
'File %s exceeds 8MB. Uploading To Google Drive.' % os.path.basename(archive))
description = 'Generated By : Discord Bot Ebook Smelter'
link_id = upload(archive, description)
if link_id:
self.send_sync('https://drive.google.com/open?id=%s' % link_id)
else:
self.send_sync('Failed to upload to google drive')
# end if
else:
# Upload small files to discord directly
k = 0
while(file_size > 1024 and k < 3):
k += 1
file_size /= 1024.0
# end while
self.send_sync(
'Uploading %s [%d%s] ...' % (
os.path.basename(archive),
int(file_size * 100) / 100.0,
['B', 'KB', 'MB', 'GB'][k]
)
)
self.wait_for(
self.user.send(
file=discord.File(
open(archive, 'rb'),
os.path.basename(archive)
)
)
)
# end if
# end def
# end class
|
import tensorflow as tf
class MLPTopic(object):
def __init__(
self, review_num_u, review_num_i, review_len_u, review_len_i, user_num, item_num, num_classes,
user_vocab_size, item_vocab_size, n_latent, embedding_id, attention_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
self.input_u = tf.placeholder(tf.int32, [None, review_num_u, review_len_u], name="input_u")
self.input_i = tf.placeholder(tf.int32, [None, review_num_i, review_len_i], name="input_i")
self.input_reuid = tf.placeholder(tf.int32, [None, review_num_u], name='input_reuid')
self.input_reiid = tf.placeholder(tf.int32, [None, review_num_i], name='input_reuid')
self.input_y = tf.placeholder(tf.float32, [None, 1], name="input_y")
self.input_uid = tf.placeholder(tf.int32, [None, 1], name="input_uid")
self.input_iid = tf.placeholder(tf.int32, [None, 1], name="input_iid")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.drop0 = tf.placeholder(tf.float32, name="dropout0")
iidW = tf.Variable(tf.random_uniform([item_num + 2, embedding_id], -0.1, 0.1), name="iidW")
uidW = tf.Variable(tf.random_uniform([user_num + 2, embedding_id], -0.1, 0.1), name="uidW")
l2_loss = tf.constant(0.0)
with tf.name_scope("user_embedding"):
self.W1 = tf.Variable(
tf.random_uniform([user_vocab_size, embedding_size], -1.0, 1.0),
name="W1")
self.embedded_user = tf.nn.embedding_lookup(self.W1, self.input_u)
self.embedded_users = tf.expand_dims(self.embedded_user, -1)
with tf.name_scope("item_embedding"):
self.W2 = tf.Variable(
tf.random_uniform([item_vocab_size, embedding_size], -1.0, 1.0),
name="W2")
self.embedded_item = tf.nn.embedding_lookup(self.W2, self.input_i)
self.embedded_items = tf.expand_dims(self.embedded_item, -1)
pooled_outputs_u = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("user_conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
self.embedded_users = tf.reshape(self.embedded_users, [-1, review_len_u, embedding_size, 1])
conv = tf.nn.conv2d(
self.embedded_users,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, review_len_u - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs_u.append(pooled)
num_filters_total = num_filters * len(filter_sizes)
self.h_pool_u = tf.concat(3,pooled_outputs_u)
self.h_pool_flat_u = tf.reshape(self.h_pool_u, [-1, review_num_u, num_filters_total])
pooled_outputs_i = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("item_conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
self.embedded_items = tf.reshape(self.embedded_items, [-1, review_len_i, embedding_size, 1])
conv = tf.nn.conv2d(
self.embedded_items,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, review_len_i - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs_i.append(pooled)
num_filters_total = num_filters * len(filter_sizes)
self.h_pool_i = tf.concat(3,pooled_outputs_i)
self.h_pool_flat_i = tf.reshape(self.h_pool_i, [-1, review_num_i, num_filters_total])
with tf.name_scope("dropout"):
self.h_drop_u = tf.nn.dropout(self.h_pool_flat_u, 1.0)
self.h_drop_i = tf.nn.dropout(self.h_pool_flat_i, 1.0)
with tf.name_scope("attention"):
Wau = tf.Variable(tf.random_uniform([num_filters_total, attention_size],
-0.1,
0.1),
name='Wau')
Wru = tf.Variable(tf.random_uniform([embedding_id, attention_size],
-0.1,
0.1),
name='Wru')
Wpu = tf.Variable(tf.random_uniform([attention_size, 1],
-0.1,
0.1),
name='Wpu')
bau = tf.Variable(tf.constant(0.1, shape=[attention_size]), name="bau")
bbu = tf.Variable(tf.constant(0.1, shape=[1]), name="bbu")
self.iid_a = tf.nn.relu(tf.nn.embedding_lookup(iidW, self.input_reuid))
self.u_j = tf.einsum('ajk,kl->ajl',
tf.nn.relu(tf.einsum('ajk,kl->ajl',
self.h_drop_u,
Wau)
+ tf.einsum('ajk,kl->ajl',
self.iid_a,
Wru)
+ bau),
Wpu) + bbu # None*u_len*1
self.u_a = tf.nn.softmax(self.u_j, 1) # none*u_len*1
#print self.u_a
Wai = tf.Variable(tf.random_uniform([num_filters_total, attention_size],
-0.1,
0.1),
name='Wai')
Wri = tf.Variable(tf.random_uniform([embedding_id, attention_size],
-0.1,
0.1),
name='Wri')
Wpi = tf.Variable(tf.random_uniform([attention_size, 1],
-0.1,
0.1),
name='Wpi')
bai = tf.Variable(tf.constant(0.1,
shape=[attention_size]),
name="bai")
bbi = tf.Variable(tf.constant(0.1,
shape=[1]),
name="bbi")
self.uid_a = tf.nn.relu(tf.nn.embedding_lookup(uidW, self.input_reiid))
self.i_j =tf.einsum('ajk,kl->ajl', tf.nn.relu(
tf.einsum('ajk,kl->ajl', self.h_drop_i, Wai) + tf.einsum('ajk,kl->ajl', self.uid_a, Wri) + bai),
Wpi)+bbi
self.i_a = tf.nn.softmax(self.i_j,1) # none*len*1
l2_loss += tf.nn.l2_loss(Wau)
l2_loss += tf.nn.l2_loss(Wru)
l2_loss += tf.nn.l2_loss(Wri)
l2_loss += tf.nn.l2_loss(Wai)
with tf.name_scope("add_reviews"):
self.u_feas = tf.reduce_sum(tf.multiply(self.u_a, self.h_drop_u), 1)
self.u_feas = tf.nn.dropout(self.u_feas, self.dropout_keep_prob)
self.i_feas = tf.reduce_sum(tf.multiply(self.i_a, self.h_drop_i), 1)
self.i_feas = tf.nn.dropout(self.i_feas, self.dropout_keep_prob)
with tf.name_scope("get_fea"):
iidmf = tf.Variable(tf.random_uniform([item_num + 2, embedding_id], -0.1, 0.1), name="iidmf")
uidmf = tf.Variable(tf.random_uniform([user_num + 2, embedding_id], -0.1, 0.1), name="uidmf")
self.uid = tf.nn.embedding_lookup(uidmf,self.input_uid)
self.iid = tf.nn.embedding_lookup(iidmf,self.input_iid)
self.uid = tf.reshape(self.uid,[-1,embedding_id])
self.iid = tf.reshape(self.iid,[-1,embedding_id])
Wu = tf.Variable(tf.random_uniform([num_filters_total, n_latent],
-0.1,
0.1),
name='Wu')
bu = tf.Variable(tf.constant(0.1, shape=[n_latent]), name="bu")
self.u_feas = tf.matmul(self.u_feas, Wu)+self.uid + bu
Wi = tf.Variable(
tf.random_uniform([num_filters_total, n_latent], -0.1, 0.1), name='Wi')
bi = tf.Variable(tf.constant(0.1, shape=[n_latent]), name="bi")
self.i_feas = tf.matmul(self.i_feas, Wi) +self.iid+ bi
with tf.name_scope('ncf'):
self.FM = tf.multiply(self.u_feas, self.i_feas)
self.FM = tf.nn.relu(self.FM)
self.FM=tf.nn.dropout(self.FM,self.dropout_keep_prob)
Wmul=tf.Variable(
tf.random_uniform([n_latent, 1], -0.1, 0.1), name='wmul')
self.mul=tf.matmul(self.FM,Wmul)
self.score=tf.reduce_sum(self.mul,1,keep_dims=True)
self.uidW2 = tf.Variable(tf.constant(0.1, shape=[user_num + 2]), name="uidW2")
self.iidW2 = tf.Variable(tf.constant(0.1, shape=[item_num + 2]), name="iidW2")
self.u_bias = tf.gather(self.uidW2, self.input_uid)
self.i_bias = tf.gather(self.iidW2, self.input_iid)
self.Feature_bias = self.u_bias + self.i_bias
self.bised = tf.Variable(tf.constant(0.1), name='bias')
self.predictions = self.score + self.Feature_bias + self.bised
with tf.name_scope("loss"):
losses = tf.nn.l2_loss(tf.subtract(self.predictions, self.input_y))
self.loss = losses + l2_reg_lambda * l2_loss
with tf.name_scope("accuracy"):
self.mae = tf.reduce_mean(tf.abs(tf.subtract(self.predictions, self.input_y)))
self.accuracy =tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.predictions, self.input_y))))
|
from tqdm import tqdm
import torch
from torch.optim import Adam, SGD
from torch.cuda.amp import autocast, GradScaler
from runtime.distributed_utils import get_rank, reduce_tensor, get_world_size
from runtime.inference import evaluate
from runtime.logging import mllog_event, mllog_start, mllog_end, CONSTANTS
def get_optimizer(params, flags):
if flags.optimizer == "adam":
optim = Adam(params, lr=flags.learning_rate, weight_decay=flags.weight_decay)
elif flags.optimizer == "sgd":
optim = SGD(params, lr=flags.learning_rate, momentum=flags.momentum, nesterov=True,
weight_decay=flags.weight_decay)
elif flags.optimizer == "lamb":
import apex
optim = apex.optimizers.FusedLAMB(params, lr=flags.learning_rate, betas=flags.lamb_betas,
weight_decay=flags.weight_decay)
else:
raise ValueError("Optimizer {} unknown.".format(flags.optimizer))
return optim
def lr_warmup(optimizer, init_lr, lr, current_epoch, warmup_epochs):
scale = current_epoch / warmup_epochs
for param_group in optimizer.param_groups:
param_group['lr'] = init_lr + (lr - init_lr) * scale
def train(flags, model, train_loader, val_loader, loss_fn, score_fn, device, callbacks, is_distributed):
rank = get_rank()
world_size = get_world_size()
torch.backends.cudnn.benchmark = flags.cudnn_benchmark
torch.backends.cudnn.deterministic = flags.cudnn_deterministic
optimizer = get_optimizer(model.parameters(), flags)
if flags.lr_decay_epochs:
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=flags.lr_decay_epochs,
gamma=flags.lr_decay_factor)
scaler = GradScaler()
model.to(device)
loss_fn.to(device)
if is_distributed:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[flags.local_rank],
output_device=flags.local_rank)
is_successful = False
diverged = False
next_eval_at = flags.start_eval_at
model.train()
for callback in callbacks:
callback.on_fit_start()
for epoch in range(1, flags.epochs + 1):
cumulative_loss = []
if epoch <= flags.lr_warmup_epochs and flags.lr_warmup_epochs > 0:
lr_warmup(optimizer, flags.init_learning_rate, flags.learning_rate, epoch, flags.lr_warmup_epochs)
mllog_start(key=CONSTANTS.BLOCK_START, sync=False,
metadata={CONSTANTS.FIRST_EPOCH_NUM: epoch, CONSTANTS.EPOCH_COUNT: 1})
mllog_start(key=CONSTANTS.EPOCH_START, metadata={CONSTANTS.EPOCH_NUM: epoch}, sync=False)
if is_distributed:
train_loader.sampler.set_epoch(epoch)
loss_value = None
optimizer.zero_grad()
for iteration, batch in enumerate(tqdm(train_loader, disable=(rank != 0) or not flags.verbose)):
image, label = batch
image, label = image.to(device), label.to(device)
for callback in callbacks:
callback.on_batch_start()
with autocast(enabled=flags.amp):
output = model(image)
loss_value = loss_fn(output, label)
loss_value /= flags.ga_steps
if flags.amp:
scaler.scale(loss_value).backward()
else:
loss_value.backward()
if (iteration + 1) % flags.ga_steps == 0:
if flags.amp:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
loss_value = reduce_tensor(loss_value, world_size).detach().cpu().numpy()
cumulative_loss.append(loss_value)
mllog_end(key=CONSTANTS.EPOCH_STOP, sync=False,
metadata={CONSTANTS.EPOCH_NUM: epoch, 'current_lr': optimizer.param_groups[0]['lr']})
if flags.lr_decay_epochs:
scheduler.step()
if epoch == next_eval_at:
next_eval_at += flags.evaluate_every
del output
mllog_start(key=CONSTANTS.EVAL_START, value=epoch, metadata={CONSTANTS.EPOCH_NUM: epoch}, sync=False)
eval_metrics = evaluate(flags, model, val_loader, loss_fn, score_fn, device, epoch)
eval_metrics["train_loss"] = sum(cumulative_loss) / len(cumulative_loss)
mllog_event(key=CONSTANTS.EVAL_ACCURACY,
value=eval_metrics["mean_dice"],
metadata={CONSTANTS.EPOCH_NUM: epoch},
sync=False)
mllog_end(key=CONSTANTS.EVAL_STOP, metadata={CONSTANTS.EPOCH_NUM: epoch}, sync=False)
for callback in callbacks:
callback.on_epoch_end(epoch=epoch, metrics=eval_metrics, model=model, optimizer=optimizer)
model.train()
if eval_metrics["mean_dice"] >= flags.quality_threshold:
is_successful = True
elif eval_metrics["mean_dice"] < 1e-6:
print("MODEL DIVERGED. ABORTING.")
diverged = True
mllog_end(key=CONSTANTS.BLOCK_STOP, sync=False,
metadata={CONSTANTS.FIRST_EPOCH_NUM: epoch, CONSTANTS.EPOCH_COUNT: 1})
if is_successful or diverged:
break
mllog_end(key=CONSTANTS.RUN_STOP, sync=True,
metadata={CONSTANTS.STATUS: CONSTANTS.SUCCESS if is_successful else CONSTANTS.ABORTED})
for callback in callbacks:
callback.on_fit_end()
|
# -*- coding: utf-8 -*-
# @Author: Zengjq
# @Date: 2019-02-20 17:07:27
# @Last Modified by: Zengjq
# @Last Modified time: 2019-02-20 17:19:05
# 99%
class Solution:
def isPalindrome(self, x: 'int') -> 'bool':
if x < 0:
return False
return str(x) == str(x)[::-1]
test_cases = (121, -123, 120)
solution = Solution()
for test_case in test_cases:
print(solution.isPalindrome(test_case))
|
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class PaymentTerms(GenericTypeCode):
"""
v3.PaymentTerms
From: http://terminology.hl7.org/ValueSet/v3-PaymentTerms in v3-codesystems.xml
Describes payment terms for a financial transaction, used in an invoice. This
is typically expressed as a responsibility of the acceptor or payor of an
invoice.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-PaymentTerms
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-PaymentTerms"
class PaymentTermsValues:
"""
Payment in full for products and/or services is required as soon as the
service is performed or goods delivered.
From: http://terminology.hl7.org/CodeSystem/v3-PaymentTerms in v3-codesystems.xml
"""
CashOnDelivery = PaymentTerms("COD")
"""
Payment in full for products and/or services is required 30 days from the time
the service is performed or goods delivered.
From: http://terminology.hl7.org/CodeSystem/v3-PaymentTerms in v3-codesystems.xml
"""
Net30Days = PaymentTerms("N30")
"""
Payment in full for products and/or services is required 60 days from the time
the service is performed or goods delivered.
From: http://terminology.hl7.org/CodeSystem/v3-PaymentTerms in v3-codesystems.xml
"""
Net60Days = PaymentTerms("N60")
"""
Payment in full for products and/or services is required 90 days from the time
the service is performed or goods delivered.
From: http://terminology.hl7.org/CodeSystem/v3-PaymentTerms in v3-codesystems.xml
"""
Net90Days = PaymentTerms("N90")
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from scipy.cluster import hierarchy
from idpflex import cnextend as cnx
from idpflex.properties import ScalarProperty
class TestClusterNodeX(object):
def test_property(self):
n = cnx.ClusterNodeX(0)
n.property_group['prop'] = True
assert n['prop'] is True
assert n['not_a_key'] is None
prop = ScalarProperty(name='some_prop', y=np.array([1, 2, 3]))
n[prop.name] = prop
assert_array_equal(n[prop.name].y, prop.y)
assert prop.node is n
with pytest.raises(AttributeError):
n['not_a_property'] = 'not a property class'
def test_property_group_features(self):
n = cnx.ClusterNodeX(0)
prop = ScalarProperty(name='some_prop', y=4)
n[prop.name] = prop
prop2 = ScalarProperty(name='some_prop2', y=2)
n[prop2.name] = prop2
fv = n.property_group.feature_vector()
assert_array_equal(fv, np.array([4, 2]))
ws = n.property_group.feature_weights()
assert_array_equal(ws, np.array([1, 1]))
def test_leafs(self, benchmark):
t = benchmark['tree']
cluster = t[benchmark['nleafs']] # fist cluster that is not a leaf
assert [n.id for n in cluster.leafs] == [19167, 19168]
cluster = t.root
assert cluster.leafs == t.leafs
def test_distance_submatrix(self, small_tree):
t = small_tree['tree']
a_cluster = t[-4] # leafs have indexes 6, 7, 8
dist_submat = a_cluster.distance_submatrix(small_tree['dist_mat'])
reference = np.array([1, 4, 1])
assert_array_equal(dist_submat, reference)
def test_representative(self, small_tree):
t = small_tree['tree']
a_cluster = t[-4]
r = a_cluster.representative(small_tree['dist_mat'])
assert r.id == 7
class TestTree(object):
def test_from_linkage_matrix(self, benchmark):
t = cnx.Tree()
t.from_linkage_matrix(benchmark['z'], node_class=hierarchy.ClusterNode)
r = t.root
assert hasattr(r, 'parent') is False
t.from_linkage_matrix(benchmark['z'], node_class=cnx.ClusterNodeX)
r = t.root
assert r.parent is None
assert len(t) == benchmark['nnodes']
def test_leafs(self, benchmark):
t = benchmark['tree']
assert len(t.leafs) == benchmark['nleafs']
def test_iter(self, benchmark):
t = benchmark['tree']
ids = sorted(range(benchmark['nnodes']), reverse=True)
assert ids == list(node.id for node in t)
def test_getitem(self, benchmark):
t = benchmark['tree']
assert t[-1] is t.root
assert list(n.id for n in t[:3]) == list(range(3))
def test_clusters_above_depth(self, benchmark):
t = benchmark['tree']
ids = [n.id for n in t.nodes_above_depth(depth=3)]
assert ids == [44732, 44748, 44752, 44753, 44754, 44755, 44756]
def test_clusters_at_depth(self, benchmark):
t = benchmark['tree']
ids = [n.id for n in t.nodes_at_depth(depth=3)]
assert ids == [44732, 44748, 44752, 44753]
def test_random_distance_tree():
out = cnx.random_distance_tree(9)
dm = out.distance_matrix
# Indexes of the two leaves with the bigget mutual distance
idx = set(np.unravel_index(np.argmax(dm), dm.shape))
# the first partition of the root node cannot contain the indexes
# the two leaves with the bigget mutual distance
idx not in set(out.tree[-2].leaf_ids)
if __name__ == '__main__':
pytest.main()
|
###############################################################################
# Copyright (c) 2018, Lawrence Livermore National Security, LLC.
#
# Produced at the Lawrence Livermore National Laboratory
#
# Written by K. Humbird (humbird1@llnl.gov), L. Peterson (peterson76@llnl.gov).
#
# LLNL-CODE-754815
#
# All rights reserved.
#
# This file is part of DJINN.
#
# For details, see github.com/LLNL/djinn.
#
# For details about use and distribution, please read DJINN/LICENSE .
###############################################################################
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except:
import tensorflow as tf
import numpy as np
try:
import cPickle
except:
import _pickle as cPickle
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import MinMaxScaler
from .djinn_fns import tree_to_nn_weights, tf_dropout_regression, \
get_hyperparams, tf_continue_training
def load(model_name, model_path="./"):
"""Reload model and launch tensorflow session.
Args:
model_name (str): Name of model.
model_path (str): Path to model.
Returns:
Object: djinn regressor model.
"""
try:
with open("%s%s.pkl"%(model_path,model_name), "rb") as f:
model=cPickle.load(f)
model.load_model(model_name, model_path)
return(model)
except:
print("Error loading model.")
return()
class DJINN_Regressor():
"""DJINN regression model.
Args:
n_trees (int): Number of trees in random forest
(equal to the number of neural networks).
max_tree_depth (int): Maximum depth of decision tree.
Neural network will have max_tree_depth-1
hidden layers.
dropout_keep_prob (float): Probability of keeping a neuron
in dropout layers.
"""
def __init__(self, n_trees=1, max_tree_depth=4, dropout_keep_prob=1.0):
self.__n_trees = n_trees
self.__tree_max_depth = max_tree_depth
self.__dropout_keep_prob = dropout_keep_prob
self.__yscale = None
self.__xscale = None
self.__sess = None
self.__regression = True
self.modelname = None
self.modelpath = None
def get_hyperparameters(self, X, Y, weight_reg=1.0e-8, random_state=None):
"""Automatic selection of djinn hyper-parameters.
Returns learning rate, number of epochs, batch size.
Args:
X (ndarray): Input parameters for training data.
Y (ndarray): Target parameters for training data.
weight_reg (float): Multiplier for L2 penalty on weights.
random_state (int): Set the random seed.
Raises:
Exception: if decision tree cannot be built from the data.
Returns:
dictionary: Dictionary with batch size,
learning rate, number of epochs
"""
if (X.ndim == 1):
print('Please reshape single-input data to a one-column array')
return
single_output = False
if (Y.ndim == 1):
single_output = True
Y = Y.reshape(-1,1)
# Scale the data
self.__xscale = MinMaxScaler().fit(X)
if self.__regression == True:
self.__yscale = MinMaxScaler().fit(Y)
# Train the random forest
rfr = RandomForestRegressor(self.__n_trees, max_depth=self.__tree_max_depth,
bootstrap=True, random_state=random_state)
if self.__regression == True:
if (single_output == True):
rfr.fit(self.__xscale.transform(X), self.__yscale.transform(Y).flatten())
else:
rfr.fit(self.__xscale.transform(X), self.__yscale.transform(Y))
else:
if (single_output == True):
rfr.fit(self.__xscale.transform(X), Y.flatten())
else:
rfr.fit(self.__xscale.transform(X), Y)
if(rfr.estimators_[0].tree_.max_depth < 1):
raise Exception("Error: Cannot build decision tree.")
# Map trees to initialized neural networks
tree_to_network = tree_to_nn_weights(self.__regression, X, Y, self.__n_trees, rfr, random_state)
print('Finding optimal hyper-parameters...')
# Run auto-djinn
nn_batch_size, learnrate, nn_epochs = get_hyperparams(self.__regression,
tree_to_network, self.__xscale, self.__yscale, X, Y,
self.__dropout_keep_prob, weight_reg, random_state=random_state)
return({'batch_size':nn_batch_size, 'learn_rate':learnrate, 'epochs':nn_epochs})
def train(self, X, Y, epochs=1000, learn_rate=0.001, batch_size=0, weight_reg=1.0e-8,
display_step=1, save_files=True, file_name="djinn", save_model=True,
model_name="djinn_model", model_path="./", random_state=None):
"""Train djinn with specified hyperparameters.
Args:
X (ndarray): Input parameters for training data.
Y (ndarray): Target parameters for training data.
epochs (int): Number of training epochs.
learn_rate (float): Learning rate for optimizaiton of weights/biases.
batch_size (int): Number of samples per batch.
weight_reg (float): Multiplier for L2 penalty on weights.
display_step (int): Cost is printed every display_steps during training.
save_files (bool): If True, saves train/valid cost per epoch, weights/biases.
file_name (str): File name used if 'save_files' is True.
save_model (bool): If True, saves the trained model.
model_name (str): File name for model if 'save_model' is True.
model_path (str): Location of where the model/files are saved.
random_state (int): Set the random seed.
Raises:
Exception: if decision tree cannot be built from the data.
Returns:
None
"""
self.modelname = model_name
self.modelpath = model_path
if (X.ndim == 1):
print('Please reshape single-input data to a one-column array')
return
# Reshape data to play well with sklearn
single_output = False
if (Y.ndim == 1):
single_output = True
Y = Y.reshape(-1,1)
# Create scalers
if(self.__xscale == None):
self.__xscale = MinMaxScaler().fit(X)
if self.__regression == True:
self.__yscale = MinMaxScaler().fit(Y)
# Train the random forest
rfr = RandomForestRegressor(self.__n_trees, max_depth=self.__tree_max_depth,
bootstrap=True, random_state=random_state)
if self.__regression == True:
if (single_output == True):
rfr.fit(self.__xscale.transform(X), self.__yscale.transform(Y).flatten())
else:
rfr.fit(self.__xscale.transform(X), self.__yscale.transform(Y))
else:
if (single_output == True):
rfr.fit(self.__xscale.transform(X), Y.flatten())
else:
rfr.fit(self.__xscale.transform(X), Y)
# Check the forest was successful
if(rfr.estimators_[0].tree_.max_depth <= 1):
raise Exception("Error: Cannot build decision tree.")
# Map trees to neural networks
tree_to_network = tree_to_nn_weights(self.__regression, X, Y, self.__n_trees, rfr, random_state)
if(batch_size == 0): batch_size = int(np.ceil(0.05*len(Y)))
tf_dropout_regression(self.__regression, tree_to_network, self.__xscale,
self.__yscale, X, Y,ntrees=self.__n_trees,
filename=file_name, learnrate=learn_rate,
training_epochs=epochs, batch_size=batch_size,
dropout_keep_prob=self.__dropout_keep_prob, weight_reg=weight_reg,
display_step=display_step, savefiles=save_files,
savemodel=save_model, modelname=self.modelname,
modelpath=self.modelpath, random_state=random_state)
if (save_model == True):
with open('%s%s.pkl'%(self.modelpath, self.modelname), 'wb') as f:
cPickle.dump(self, f)
def fit(self, X, Y, epochs=None, learn_rate=None, batch_size=None, weight_reg=1.0e-8,
display_step=1, save_files=True, file_name="djinn", save_model=True,
model_name="djinn_model", model_path="./", random_state=None):
"""Trains djinn model with optimal settings, if not supplied.
Args:
X (ndarray): Input parameters for training data.
Y (ndarray): Target parameters for training data.
epochs (int): Number of training epochs.
learn_rate (float): Learning rate for optimizaiton of weights/biases.
batch_size (int): Number of samples per batch.
weight_reg (float): Multiplier for L2 penalty on weights.
display_step (int): Cost is printed every display_steps during training.
save_files (bool): If True, saves train/valid cost per epoch, weights/biases.
file_name (str): File name used if 'save_files' is True.
save_model (bool): If True, saves the trained model.
model_name (str): File name for model if 'save_model' is True.
model_path (str): Location of where the model/files are saved.
random_state (int): Set the random seed.
Returns:
None
"""
if(learn_rate == None):
optimal=self.get_hyperparameters(X, Y, weight_reg, random_state)
learn_rate=optimal['learn_rate']
batch_size=optimal['batch_size']
epochs=optimal['epochs']
self.train(X, Y, epochs, learn_rate, batch_size, weight_reg,
display_step, save_files, file_name, save_model,
model_name, model_path, random_state)
def load_model(self, model_name, model_path):
"""Reload tensorflow session for saved model. Called by djinn.load,
Args:
model_path (str, optional): Location of model if different than
location set during training.
model_name (str, optional): Name of model if different than
name set during training.
Returns:
Object: djinn regressor model.
"""
self.__sess = {}
for p in range(0, self.__n_trees):
tf.reset_default_graph()
new_saver = \
tf.train.import_meta_graph('%s%s_tree%s.ckpt.meta'%(model_path,model_name,p))
self.__sess[p] = tf.Session()
new_saver.restore(self.__sess[p], '%s%s_tree%s.ckpt'%(model_path,model_name,p))
print("Model %s restored"%p)
def close_model(self):
"""Closes tensorflow sessions launched with djinn.load.
Args:
None
Returns:
None
"""
for p in range(0, self.__n_trees):
self.__sess[p].close()
def bayesian_predict(self, x_test, n_iters, random_state=None):
"""Bayesian distribution of predictions for a set of test data.
Args:
x_test (ndarray): Input parameters for test data.
n_iters (int): Number of times to evaluate each neural network
per test point.
random_state (int): Set the random seed.
Returns:
tuple (ndarray, ndarray, ndarray, dict):
25th percentile of distribution of predictions for each test point.
50th percentile of distribution of predictions for each test point.
75th percentile of distribution of predictions for each test point.
Dictionary containing inputs and predictions per tree, per
iteration, for each test point.
"""
nonBayes = False
if(n_iters == None):
nonBayes = True
n_iters = 1
if(random_state): tf.set_random_seed(random_state)
if(self.__sess == None): self.load_model(self.modelname, self.modelpath)
if(x_test.ndim == 1): x_test = x_test.reshape(1,-1)
samples = {}
samples['inputs'] = x_test
x_test = self.__xscale.transform(x_test)
samples['predictions'] = {}
for p in range(0, self.__n_trees):
x = self.__sess[p].graph.get_tensor_by_name("input:0")
keep_prob = self.__sess[p].graph.get_tensor_by_name("keep_prob:0")
pred = self.__sess[p].graph.get_tensor_by_name("prediction:0")
samples['predictions']['tree%s'%p] = \
[self.__yscale.inverse_transform(self.__sess[p].run(pred,\
feed_dict={x:x_test, keep_prob:self.__dropout_keep_prob}))
for i in range(n_iters)]
nout = samples['predictions']['tree0'][0].shape[1]
preds = np.array([samples['predictions'][t]
for t in samples['predictions']]).reshape((n_iters*self.__n_trees, len(x_test), nout))
middle = np.percentile(preds, 50, axis=0)
lower = np.percentile(preds, 25, axis=0)
upper = np.percentile(preds, 75, axis=0)
if(nonBayes == True):
return(np.mean(preds, axis=0))
else:
return(lower, middle, upper, samples)
def predict(self, x_test, random_state=None):
"""Predict target values for a set of test data.
Args:
x_test (ndarray): Input parameters for test data.
random_state (int): Set the random seed.
Returns:
ndarray: Mean target value prediction for each test point.
"""
return self.bayesian_predict(x_test, None, random_state)
def collect_tree_predictions(self, predictions):
"""Gather distributions of predictions for each test point.
Args:
predictions (dict): The 'predictions' key from the dictionary
returned by bayesian_predict.
Returns:
ndarray: Re-shaped predictions (niters*ntrees, # test points, output dim)
"""
nout = predictions['tree0'][0].shape[1]
n_iters = len(predictions['tree0'])
xlength = predictions['tree0'][0].shape[0]
preds = np.array([predictions[t]
for t in predictions]).reshape((n_iters*self.__n_trees, xlength, nout))
return(preds)
def continue_training(self, X, Y, training_epochs, learn_rate, batch_size, display_step=1, random_state=None):
"""Continue training an exisitng model. Must load_model first.
Model is resaved in current location.
Args:
X (ndarray): Input parameters for training data.
Y (ndarray): Target parameters for training data.
epochs (int): Number of training epochs.
learn_rate (float): Learning rate for optimizaiton of weights/biases.
batch_size (int): Number of samples per batch.
display_step (int): Cost is printed every display_steps during training.
random_state (int): Set the random seed.
Returns:
None
"""
ntrees=self.__n_trees
nhl=self.__tree_max_depth-1
dropout_keep_prob=self.__dropout_keep_prob
tf_continue_training(self.__regression, self.__xscale, self.__yscale,
X, Y, ntrees, learn_rate, training_epochs, batch_size,
self.__dropout_keep_prob, nhl, display_step,
self.modelname, self.modelpath, random_state)
class DJINN_Classifier(DJINN_Regressor):
"""DJINN classification model.
Args:
n_trees (int): Number of trees in random forest
(equal to the number of neural networks).
max_tree_depth (int): Maximum depth of decision tree.
Neural network will have max_tree_depth-1
hidden layers.
dropout_keep_prob (float): Probability of keeping a neuron
in dropout layers.
"""
def __init__(self, n_trees=1, max_tree_depth=4, dropout_keep_prob=1.0):
DJINN_Regressor.__init__(self, n_trees, max_tree_depth, dropout_keep_prob)
self._DJINN_Regressor__regression = False
def bayesian_predict(self, x_test, n_iters, random_state=None):
"""Bayesian distribution of predictions for a set of test data.
Args:
x_test (ndarray): Input parameters for test data.
n_iters (int): Number of times to evaluate each neural network
per test point.
random_state (int): Set the random seed.
Returns:
tuple (ndarray, ndarray, ndarray, dict):
25th percentile of distribution of predictions for each test point.
50th percentile of distribution of predictions for each test point.
75th percentile of distribution of predictions for each test point.
Dictionary containing inputs and predictions per tree, per
iteration, for each test point.
"""
nonBayes = False
if(n_iters == None):
nonBayes = True
n_iters = 1
if(random_state): tf.set_random_seed(random_state)
if(self._DJINN_Regressor__sess == None): self.load_model(self.modelname, self.modelpath)
if(x_test.ndim == 1): x_test = x_test.reshape(1,-1)
samples = {}
samples['inputs'] = x_test
x_test = self._DJINN_Regressor__xscale.transform(x_test)
samples['predictions'] = {}
for p in range(0, self._DJINN_Regressor__n_trees):
x = self._DJINN_Regressor__sess[p].graph.get_tensor_by_name("input:0")
keep_prob = self._DJINN_Regressor__sess[p].graph.get_tensor_by_name("keep_prob:0")
pred = self._DJINN_Regressor__sess[p].graph.get_tensor_by_name("prediction:0")
samples['predictions']['tree%s'%p] = \
[self._DJINN_Regressor__sess[p].run(pred,\
feed_dict={x:x_test, keep_prob:self._DJINN_Regressor__dropout_keep_prob}) for i in range(n_iters)]
nout = samples['predictions']['tree0'][0].shape[1]
preds = np.array([samples['predictions'][t]
for t in samples['predictions']]).reshape((n_iters*self._DJINN_Regressor__n_trees, len(x_test), nout))
print(preds.shape)
middle = np.argmax(np.percentile(preds,50,axis=0),1)
lower = np.argmax(np.percentile(preds,25,axis=0), 1)
upper = np.argmax(np.percentile(preds,75,axis=0), 1)
if(nonBayes == True):
return(middle)
else:
return(lower, middle, upper, samples)
|
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class MaskPointHead(BaseModule):
"""A mask point head use in PointRend.
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict | None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict | None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
num_fcs=3,
in_channels=256,
fc_channels=256,
class_agnostic=False,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=dict(
type='Normal', std=0.001,
override=dict(name='fc_logits'))):
super().__init__(init_cfg)
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for _ in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
out_channels = 1 if self.class_agnostic else self.num_classes
self.fc_logits = nn.Conv1d(
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, fine_grained_feats, coarse_feats):
"""Classify each point base on fine grained and coarse feats.
Args:
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
shape (num_rois, in_channels, num_points).
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
shape (num_rois, num_classes, num_points).
Returns:
Tensor: Point classification results,
shape (num_rois, num_class, num_points).
"""
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_feats), dim=1)
return self.fc_logits(x)
def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
cfg):
"""Get training targets of MaskPointHead for all images.
Args:
rois (Tensor): Region of Interest, shape (num_rois, 5).
rel_roi_points: Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
gt_masks (Tensor) : Ground truth segmentation masks of
corresponding boxes, shape (num_rois, height, width).
cfg (dict): Training cfg.
Returns:
Tensor: Point target, shape (num_rois, num_points).
"""
num_imgs = len(sampling_results)
rois_list = []
rel_roi_points_list = []
for batch_ind in range(num_imgs):
inds = (rois[:, 0] == batch_ind)
rois_list.append(rois[inds])
rel_roi_points_list.append(rel_roi_points[inds])
pos_assigned_gt_inds_list = [
res.pos_assigned_gt_inds for res in sampling_results
]
cfg_list = [cfg for _ in range(num_imgs)]
point_targets = map(self._get_target_single, rois_list,
rel_roi_points_list, pos_assigned_gt_inds_list,
gt_masks, cfg_list)
point_targets = list(point_targets)
if len(point_targets) > 0:
point_targets = torch.cat(point_targets)
return point_targets
def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
gt_masks, cfg):
"""Get training target of MaskPointHead for each image."""
num_pos = rois.size(0)
num_points = cfg.num_points
if num_pos > 0:
gt_masks_th = (
gt_masks.to_tensor(rois.dtype, rois.device).index_select(
0, pos_assigned_gt_inds))
gt_masks_th = gt_masks_th.unsqueeze(1)
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, gt_masks_th.shape[2:])
point_targets = point_sample(gt_masks_th,
rel_img_points).squeeze(1)
else:
point_targets = rois.new_zeros((0, num_points))
return point_targets
def loss(self, point_pred, point_targets, labels):
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
point_targets (Tensor): Point targets, shape (num_roi, num_points).
labels (Tensor): Class label of corresponding boxes,
shape (num_rois, )
Returns:
dict[str, Tensor]: a dictionary of point loss components
"""
loss = dict()
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_targets,
torch.zeros_like(labels))
else:
loss_point = self.loss_point(point_pred, point_targets, labels)
loss['loss_point'] = loss_point
return loss
def _get_uncertainty(self, mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_roi_rel_points_train(self, mask_pred, labels, cfg):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'_get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
num_points = cfg.num_points
oversample_ratio = cfg.oversample_ratio
importance_sample_ratio = cfg.importance_sample_ratio
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = self._get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
"""Get ``num_points`` most uncertain points during test.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
pred_label (list): The predication class for each instance.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (num_rois, num_points)
that contains indices from [0, mask_height x mask_width) of the
most uncertain points.
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
num_rois, _, mask_height, mask_width = uncertainty_map.shape
h_step = 1.0 / mask_height
w_step = 1.0 / mask_width
uncertainty_map = uncertainty_map.view(num_rois,
mask_height * mask_width)
num_points = min(mask_height * mask_width, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
mask_width).float() * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
mask_width).float() * h_step
return point_indices, point_coords
|
# Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
`matplotlib.pyplot` is a state-based interface to matplotlib. It provides
a MATLAB-like way of plotting.
pyplot is mainly intended for interactive plots and simple cases of
programmatic plot generation::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1)
y = np.sin(x)
plt.plot(x, y)
The object-oriented API is recommended for more complex plots.
"""
import functools
import importlib
import inspect
import logging
from numbers import Number
import re
import sys
import time
try:
import threading
except ImportError:
import dummy_threading as threading
from cycler import cycler
import matplotlib
import matplotlib.colorbar
import matplotlib.image
from matplotlib import _api
from matplotlib import rcsetup, style
from matplotlib import _pylab_helpers, interactive
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.backend_bases import FigureCanvasBase, MouseButton
from matplotlib.figure import Figure, figaspect
from matplotlib.gridspec import GridSpec, SubplotSpec
from matplotlib import rcParams, rcParamsDefault, get_backend, rcParamsOrig
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import Artist
from matplotlib.axes import Axes, Subplot
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for detrend_none, window_hanning
from matplotlib.scale import get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap, register_cmap
import numpy as np
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from .ticker import (
TickHelper, Formatter, FixedFormatter, NullFormatter, FuncFormatter,
FormatStrFormatter, ScalarFormatter, LogFormatter, LogFormatterExponent,
LogFormatterMathtext, Locator, IndexLocator, FixedLocator, NullLocator,
LinearLocator, LogLocator, AutoLocator, MultipleLocator, MaxNLocator)
_log = logging.getLogger(__name__)
_code_objs = {
_api.rename_parameter:
_api.rename_parameter("", "old", "new", lambda new: None).__code__,
_api.make_keyword_only:
_api.make_keyword_only("", "p", lambda p: None).__code__,
}
def _copy_docstring_and_deprecators(method, func=None):
if func is None:
return functools.partial(_copy_docstring_and_deprecators, method)
decorators = [docstring.copy(method)]
# Check whether the definition of *method* includes @_api.rename_parameter
# or @_api.make_keyword_only decorators; if so, propagate them to the
# pyplot wrapper as well.
while getattr(method, "__wrapped__", None) is not None:
for decorator_maker, code in _code_objs.items():
if method.__code__ is code:
kwargs = {
k: v.cell_contents
for k, v in zip(code.co_freevars, method.__closure__)}
assert kwargs["func"] is method.__wrapped__
kwargs.pop("func")
decorators.append(decorator_maker(**kwargs))
method = method.__wrapped__
for decorator in decorators[::-1]:
func = decorator(func)
return func
## Global ##
_IP_REGISTERED = None
_INSTALL_FIG_OBSERVER = False
def install_repl_displayhook():
"""
Install a repl display hook so that any stale figure are automatically
redrawn when control is returned to the repl.
This works both with IPython and with vanilla python shells.
"""
global _IP_REGISTERED
global _INSTALL_FIG_OBSERVER
class _NotIPython(Exception):
pass
# see if we have IPython hooks around, if use them
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
ip = get_ipython()
if ip is None:
raise _NotIPython()
if _IP_REGISTERED:
return
def post_execute():
if matplotlib.is_interactive():
draw_all()
# IPython >= 2
try:
ip.events.register('post_execute', post_execute)
except AttributeError:
# IPython 1.x
ip.register_post_execute(post_execute)
_IP_REGISTERED = post_execute
_INSTALL_FIG_OBSERVER = False
# trigger IPython's eventloop integration, if available
from IPython.core.pylabtools import backend2gui
ipython_gui_name = backend2gui.get(get_backend())
if ipython_gui_name:
ip.enable_gui(ipython_gui_name)
else:
_INSTALL_FIG_OBSERVER = True
# import failed or ipython is not running
except (ImportError, _NotIPython):
_INSTALL_FIG_OBSERVER = True
def uninstall_repl_displayhook():
"""
Uninstall the matplotlib display hook.
.. warning::
Need IPython >= 2 for this to work. For IPython < 2 will raise a
``NotImplementedError``
.. warning::
If you are using vanilla python and have installed another
display hook this will reset ``sys.displayhook`` to what ever
function was there when matplotlib installed it's displayhook,
possibly discarding your changes.
"""
global _IP_REGISTERED
global _INSTALL_FIG_OBSERVER
if _IP_REGISTERED:
from IPython import get_ipython
ip = get_ipython()
try:
ip.events.unregister('post_execute', _IP_REGISTERED)
except AttributeError as err:
raise NotImplementedError("Can not unregister events "
"in IPython < 2.0") from err
_IP_REGISTERED = None
if _INSTALL_FIG_OBSERVER:
_INSTALL_FIG_OBSERVER = False
draw_all = _pylab_helpers.Gcf.draw_all
@functools.wraps(matplotlib.set_loglevel)
def set_loglevel(*args, **kwargs): # Ensure this appears in the pyplot docs.
return matplotlib.set_loglevel(*args, **kwargs)
@_copy_docstring_and_deprecators(Artist.findobj)
def findobj(o=None, match=None, include_self=True):
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
def _get_required_interactive_framework(backend_mod):
return getattr(
backend_mod.FigureCanvas, "required_interactive_framework", None)
def switch_backend(newbackend):
"""
Close all open figures and set the Matplotlib backend.
The argument is case-insensitive. Switching to an interactive backend is
possible only if no event loop for another interactive backend has started.
Switching to and from non-interactive backends is always possible.
Parameters
----------
newbackend : str
The name of the backend to use.
"""
global _backend_mod
# make sure the init is pulled up so we can assign to it later
import matplotlib.backends
close("all")
if newbackend is rcsetup._auto_backend_sentinel:
current_framework = cbook._get_running_interactive_framework()
mapping = {'qt5': 'qt5agg',
'qt4': 'qt4agg',
'gtk3': 'gtk3agg',
'wx': 'wxagg',
'tk': 'tkagg',
'macosx': 'macosx',
'headless': 'agg'}
best_guess = mapping.get(current_framework, None)
if best_guess is not None:
candidates = [best_guess]
else:
candidates = []
candidates += ["macosx", "qt5agg", "gtk3agg", "tkagg", "wxagg"]
# Don't try to fallback on the cairo-based backends as they each have
# an additional dependency (pycairo) over the agg-based backend, and
# are of worse quality.
for candidate in candidates:
try:
switch_backend(candidate)
except ImportError:
continue
else:
rcParamsOrig['backend'] = candidate
return
else:
# Switching to Agg should always succeed; if it doesn't, let the
# exception propagate out.
switch_backend("agg")
rcParamsOrig["backend"] = "agg"
return
# Backends are implemented as modules, but "inherit" default method
# implementations from backend_bases._Backend. This is achieved by
# creating a "class" that inherits from backend_bases._Backend and whose
# body is filled with the module's globals.
backend_name = cbook._backend_module_name(newbackend)
class backend_mod(matplotlib.backend_bases._Backend):
locals().update(vars(importlib.import_module(backend_name)))
required_framework = _get_required_interactive_framework(backend_mod)
if required_framework is not None:
current_framework = cbook._get_running_interactive_framework()
if (current_framework and required_framework
and current_framework != required_framework):
raise ImportError(
"Cannot load backend {!r} which requires the {!r} interactive "
"framework, as {!r} is currently running".format(
newbackend, required_framework, current_framework))
_log.debug("Loaded backend %s version %s.",
newbackend, backend_mod.backend_version)
rcParams['backend'] = rcParamsDefault['backend'] = newbackend
_backend_mod = backend_mod
for func_name in ["new_figure_manager", "draw_if_interactive", "show"]:
globals()[func_name].__signature__ = inspect.signature(
getattr(backend_mod, func_name))
# Need to keep a global reference to the backend for compatibility reasons.
# See https://github.com/matplotlib/matplotlib/issues/6092
matplotlib.backends.backend = newbackend
def _warn_if_gui_out_of_main_thread():
if (_get_required_interactive_framework(_backend_mod)
and threading.current_thread() is not threading.main_thread()):
_api.warn_external(
"Starting a Matplotlib GUI outside of the main thread will likely "
"fail.")
# This function's signature is rewritten upon backend-load by switch_backend.
def new_figure_manager(*args, **kwargs):
"""Create a new figure manager instance."""
_warn_if_gui_out_of_main_thread()
return _backend_mod.new_figure_manager(*args, **kwargs)
# This function's signature is rewritten upon backend-load by switch_backend.
def draw_if_interactive(*args, **kwargs):
"""
Redraw the current figure if in interactive mode.
.. warning::
End users will typically not have to call this function because the
the interactive mode takes care of this.
"""
return _backend_mod.draw_if_interactive(*args, **kwargs)
# This function's signature is rewritten upon backend-load by switch_backend.
def show(*args, **kwargs):
"""
Display all open figures.
Parameters
----------
block : bool, optional
Whether to wait for all figures to be closed before returning.
If `True` block and run the GUI main loop until all figure windows
are closed.
If `False` ensure that all figure windows are displayed and return
immediately. In this case, you are responsible for ensuring
that the event loop is running to have responsive figures.
Defaults to True in non-interactive mode and to False in interactive
mode (see `.pyplot.isinteractive`).
See Also
--------
ion : Enable interactive mode, which shows / updates the figure after
every plotting command, so that calling ``show()`` is not necessary.
ioff : Disable interactive mode.
savefig : Save the figure to an image file instead of showing it on screen.
Notes
-----
**Saving figures to file and showing a window at the same time**
If you want an image file as well as a user interface window, use
`.pyplot.savefig` before `.pyplot.show`. At the end of (a blocking)
``show()`` the figure is closed and thus unregistered from pyplot. Calling
`.pyplot.savefig` afterwards would save a new and thus empty figure. This
limitation of command order does not apply if the show is non-blocking or
if you keep a reference to the figure and use `.Figure.savefig`.
**Auto-show in jupyter notebooks**
The jupyter backends (activated via ``%matplotlib inline``,
``%matplotlib notebook``, or ``%matplotlib widget``), call ``show()`` at
the end of every cell by default. Thus, you usually don't have to call it
explicitly there.
"""
_warn_if_gui_out_of_main_thread()
return _backend_mod.show(*args, **kwargs)
def isinteractive():
"""
Return whether plots are updated after every plotting command.
The interactive mode is mainly useful if you build plots from the command
line and want to see the effect of each command while you are building the
figure.
In interactive mode:
- newly created figures will be shown immediately;
- figures will automatically redraw on change;
- `.pyplot.show` will not block by default.
In non-interactive mode:
- newly created figures and changes to figures will not be reflected until
explicitly asked to be;
- `.pyplot.show` will block by default.
See Also
--------
ion : Enable interactive mode.
ioff : Disable interactive mode.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
"""
return matplotlib.is_interactive()
class _IoffContext:
"""
Context manager for `.ioff`.
The state is changed in ``__init__()`` instead of ``__enter__()``. The
latter is a no-op. This allows using `.ioff` both as a function and
as a context.
"""
def __init__(self):
self.wasinteractive = isinteractive()
matplotlib.interactive(False)
uninstall_repl_displayhook()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if self.wasinteractive:
matplotlib.interactive(True)
install_repl_displayhook()
else:
matplotlib.interactive(False)
uninstall_repl_displayhook()
class _IonContext:
"""
Context manager for `.ion`.
The state is changed in ``__init__()`` instead of ``__enter__()``. The
latter is a no-op. This allows using `.ion` both as a function and
as a context.
"""
def __init__(self):
self.wasinteractive = isinteractive()
matplotlib.interactive(True)
install_repl_displayhook()
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if not self.wasinteractive:
matplotlib.interactive(False)
uninstall_repl_displayhook()
else:
matplotlib.interactive(True)
install_repl_displayhook()
def ioff():
"""
Disable interactive mode.
See `.pyplot.isinteractive` for more details.
See Also
--------
ion : Enable interactive mode.
isinteractive : Whether interactive mode is enabled.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
Notes
-----
For a temporary change, this can be used as a context manager::
# if interactive mode is on
# then figures will be shown on creation
plt.ion()
# This figure will be shown immediately
fig = plt.figure()
with plt.ioff():
# interactive mode will be off
# figures will not automatically be shown
fig2 = plt.figure()
# ...
To enable usage as a context manager, this function returns an
``_IoffContext`` object. The return value is not intended to be stored
or accessed by the user.
"""
return _IoffContext()
def ion():
"""
Enable interactive mode.
See `.pyplot.isinteractive` for more details.
See Also
--------
ioff : Disable interactive mode.
isinteractive : Whether interactive mode is enabled.
show : Show all figures (and maybe block).
pause : Show all figures, and block for a time.
Notes
-----
For a temporary change, this can be used as a context manager::
# if interactive mode is off
# then figures will not be shown on creation
plt.ioff()
# This figure will not be shown immediately
fig = plt.figure()
with plt.ion():
# interactive mode will be on
# figures will automatically be shown
fig2 = plt.figure()
# ...
To enable usage as a context manager, this function returns an
``_IonContext`` object. The return value is not intended to be stored
or accessed by the user.
"""
return _IonContext()
def pause(interval):
"""
Run the GUI event loop for *interval* seconds.
If there is an active figure, it will be updated and displayed before the
pause, and the GUI event loop (if any) will run during the pause.
This can be used for crude animation. For more complex animation use
:mod:`matplotlib.animation`.
If there is no active figure, sleep for *interval* seconds instead.
See Also
--------
matplotlib.animation : Proper animations
show : Show all figures and optional block until all figures are closed.
"""
manager = _pylab_helpers.Gcf.get_active()
if manager is not None:
canvas = manager.canvas
if canvas.figure.stale:
canvas.draw_idle()
show(block=False)
canvas.start_event_loop(interval)
else:
time.sleep(interval)
@_copy_docstring_and_deprecators(matplotlib.rc)
def rc(group, **kwargs):
matplotlib.rc(group, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.rc_context)
def rc_context(rc=None, fname=None):
return matplotlib.rc_context(rc, fname)
@_copy_docstring_and_deprecators(matplotlib.rcdefaults)
def rcdefaults():
matplotlib.rcdefaults()
if matplotlib.is_interactive():
draw_all()
# getp/get/setp are explicitly reexported so that they show up in pyplot docs.
@_copy_docstring_and_deprecators(matplotlib.artist.getp)
def getp(obj, *args, **kwargs):
return matplotlib.artist.getp(obj, *args, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.artist.get)
def get(obj, *args, **kwargs):
return matplotlib.artist.get(obj, *args, **kwargs)
@_copy_docstring_and_deprecators(matplotlib.artist.setp)
def setp(obj, *args, **kwargs):
return matplotlib.artist.setp(obj, *args, **kwargs)
def xkcd(scale=1, length=100, randomness=2):
"""
Turn on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode. This will
only have effect on things drawn after this function is called.
For best results, the "Humor Sans" font should be installed: it is
not included with Matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
return _xkcd(scale, length, randomness)
class _xkcd:
# This cannot be implemented in terms of rc_context() because this needs to
# work as a non-contextmanager too.
def __init__(self, scale, length, randomness):
self._orig = rcParams.copy()
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
from matplotlib import patheffects
rcParams.update({
'font.family': ['xkcd', 'xkcd Script', 'Humor Sans', 'Comic Neue',
'Comic Sans MS'],
'font.size': 14.0,
'path.sketch': (scale, length, randomness),
'path.effects': [
patheffects.withStroke(linewidth=4, foreground="w")],
'axes.linewidth': 1.5,
'lines.linewidth': 2.0,
'figure.facecolor': 'white',
'grid.linewidth': 0.0,
'axes.grid': False,
'axes.unicode_minus': False,
'axes.edgecolor': 'black',
'xtick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.size': 8,
'ytick.major.width': 3,
})
def __enter__(self):
return self
def __exit__(self, *args):
dict.update(rcParams, self._orig)
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
clear=False,
**kwargs
):
"""
Create a new figure, or activate an existing figure.
Parameters
----------
num : int or str or `.Figure`, optional
A unique identifier for the figure.
If a figure with that identifier already exists, this figure is made
active and returned. An integer refers to the ``Figure.number``
attribute, a string refers to the figure label.
If there is no figure with the identifier or *num* is not given, a new
figure is created, made active and returned. If *num* is an int, it
will be used for the ``Figure.number`` attribute, otherwise, an
auto-generated integer value is used (starting at 1 and incremented
for each new figure). If *num* is a string, the figure label and the
window title is set to this value.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches.
dpi : float, default: :rc:`figure.dpi`
The resolution of the figure in dots-per-inch.
facecolor : color, default: :rc:`figure.facecolor`
The background color.
edgecolor : color, default: :rc:`figure.edgecolor`
The border color.
frameon : bool, default: True
If False, suppress drawing the figure frame.
FigureClass : subclass of `~matplotlib.figure.Figure`
Optionally use a custom `.Figure` instance.
clear : bool, default: False
If True and the figure already exists, then it is cleared.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool, default: :rc:`figure.constrained_layout.use`
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with `add_subplot` or
`~.pyplot.subplot2grid`.)
**kwargs : optional
See `~.matplotlib.figure.Figure` for other possible arguments.
Returns
-------
`~matplotlib.figure.Figure`
The `.Figure` instance returned will also be passed to
new_figure_manager in the backends, which allows to hook custom
`.Figure` classes into the pyplot interface. Additional kwargs will be
passed to the `.Figure` init function.
Notes
-----
If you are creating many figures, make sure you explicitly call
`.pyplot.close` on the figures you are not using, because this will
enable pyplot to properly clean up the memory.
`~matplotlib.rcParams` defines the default values, which can be modified
in the matplotlibrc file.
"""
if isinstance(num, Figure):
if num.canvas.manager is None:
raise ValueError("The passed figure is not managed by pyplot")
_pylab_helpers.Gcf.set_active(num.canvas.manager)
return num
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
fig_label = ''
if num is None:
num = next_num
elif isinstance(num, str):
fig_label = num
all_labels = get_figlabels()
if fig_label not in all_labels:
if fig_label == 'all':
_api.warn_external("close('all') closes all existing figures.")
num = next_num
else:
inum = all_labels.index(fig_label)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
manager = _pylab_helpers.Gcf.get_fig_manager(num)
if manager is None:
max_open_warning = rcParams['figure.max_open_warning']
if len(allnums) == max_open_warning >= 1:
_api.warn_external(
f"More than {max_open_warning} figures have been opened. "
f"Figures created through the pyplot interface "
f"(`matplotlib.pyplot.figure`) are retained until explicitly "
f"closed and may consume too much memory. (To control this "
f"warning, see the rcParam `figure.max_open_warning`).",
RuntimeWarning)
manager = new_figure_manager(
num, figsize=figsize, dpi=dpi,
facecolor=facecolor, edgecolor=edgecolor, frameon=frameon,
FigureClass=FigureClass, **kwargs)
fig = manager.canvas.figure
if fig_label:
fig.set_label(fig_label)
_pylab_helpers.Gcf._set_new_active_manager(manager)
# make sure backends (inline) that we don't ship that expect this
# to be called in plotting commands to make the figure call show
# still work. There is probably a better way to do this in the
# FigureManager base class.
draw_if_interactive()
if _INSTALL_FIG_OBSERVER:
fig.stale_callback = _auto_draw_if_interactive
if clear:
manager.canvas.figure.clear()
return manager.canvas.figure
def _auto_draw_if_interactive(fig, val):
"""
An internal helper function for making sure that auto-redrawing
works as intended in the plain python repl.
Parameters
----------
fig : Figure
A figure object which is assumed to be associated with a canvas
"""
if (val and matplotlib.is_interactive()
and not fig.canvas.is_saving()
and not fig.canvas._is_idle_drawing):
# Some artists can mark themselves as stale in the middle of drawing
# (e.g. axes position & tick labels being computed at draw time), but
# this shouldn't trigger a redraw because the current redraw will
# already take them into account.
with fig.canvas._idle_draw_cntx():
fig.canvas.draw_idle()
def gcf():
"""
Get the current figure.
If no current figure exists, a new one is created using
`~.pyplot.figure()`.
"""
manager = _pylab_helpers.Gcf.get_active()
if manager is not None:
return manager.canvas.figure
else:
return figure()
def fignum_exists(num):
"""Return whether the figure with the given id exists."""
return _pylab_helpers.Gcf.has_fignum(num) or num in get_figlabels()
def get_fignums():
"""Return a list of existing figure numbers."""
return sorted(_pylab_helpers.Gcf.figs)
def get_figlabels():
"""Return a list of existing figure labels."""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
managers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in managers]
def get_current_fig_manager():
"""
Return the figure manager of the current figure.
The figure manager is a container for the actual backend-depended window
that displays the figure on screen.
If no current figure exists, a new one is created, and its figure
manager is returned.
Returns
-------
`.FigureManagerBase` or backend-dependent subclass thereof
"""
return gcf().canvas.manager
@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_connect)
def connect(s, func):
return gcf().canvas.mpl_connect(s, func)
@_copy_docstring_and_deprecators(FigureCanvasBase.mpl_disconnect)
def disconnect(cid):
return gcf().canvas.mpl_disconnect(cid)
def close(fig=None):
"""
Close a figure window.
Parameters
----------
fig : None or int or str or `.Figure`
The figure to close. There are a number of ways to specify this:
- *None*: the current figure
- `.Figure`: the given `.Figure` instance
- ``int``: a figure number
- ``str``: a figure name
- 'all': all figures
"""
if fig is None:
manager = _pylab_helpers.Gcf.get_active()
if manager is None:
return
else:
_pylab_helpers.Gcf.destroy(manager)
elif fig == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(fig, int):
_pylab_helpers.Gcf.destroy(fig)
elif hasattr(fig, 'int'):
# if we are dealing with a type UUID, we
# can use its integer representation
_pylab_helpers.Gcf.destroy(fig.int)
elif isinstance(fig, str):
all_labels = get_figlabels()
if fig in all_labels:
num = get_fignums()[all_labels.index(fig)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(fig, Figure):
_pylab_helpers.Gcf.destroy_fig(fig)
else:
raise TypeError("close() argument must be a Figure, an int, a string, "
"or None, not %s" % type(fig))
def clf():
"""Clear the current figure."""
gcf().clf()
def draw():
"""
Redraw the current figure.
This is used to update a figure that has been altered, but not
automatically re-drawn. If interactive mode is on (via `.ion()`), this
should be only rarely needed, but there may be ways to modify the state of
a figure without marking it as "stale". Please report these cases as bugs.
This is equivalent to calling ``fig.canvas.draw_idle()``, where ``fig`` is
the current figure.
"""
gcf().canvas.draw_idle()
@_copy_docstring_and_deprecators(Figure.savefig)
def savefig(*args, **kwargs):
fig = gcf()
res = fig.savefig(*args, **kwargs)
fig.canvas.draw_idle() # need this if 'transparent=True' to reset colors
return res
## Putting things in figures ##
def figlegend(*args, **kwargs):
return gcf().legend(*args, **kwargs)
if Figure.legend.__doc__:
figlegend.__doc__ = Figure.legend.__doc__.replace("legend(", "figlegend(")
## Axes ##
@docstring.dedent_interpd
def axes(arg=None, **kwargs):
"""
Add an axes to the current figure and make it the current axes.
Call signatures::
plt.axes()
plt.axes(rect, projection=None, polar=False, **kwargs)
plt.axes(ax)
Parameters
----------
arg : None or 4-tuple
The exact behavior of this function depends on the type:
- *None*: A new full window axes is added using
``subplot(**kwargs)``.
- 4-tuple of floats *rect* = ``[left, bottom, width, height]``.
A new axes is added with dimensions *rect* in normalized
(0, 1) units using `~.Figure.add_axes` on the current figure.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Returns
-------
`~.axes.Axes`, or a subclass of `~.axes.Axes`
The returned axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection is used and
`.projections.polar.PolarAxes` if polar projection is used.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned axes class. The keyword arguments for the
rectilinear axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual axes
class.
%(Axes_kwdoc)s
Notes
-----
If the figure already has a axes with key (*args*,
*kwargs*) then it will simply make that axes current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new axes), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two axes that are otherwise identical to be added to the figure,
make sure you give them unique labels.
See Also
--------
.Figure.add_axes
.pyplot.subplot
.Figure.add_subplot
.Figure.subplots
.pyplot.subplots
Examples
--------
::
# Creating a new full window axes
plt.axes()
# Creating a new axes with specified dimensions and some kwargs
plt.axes((left, bottom, width, height), facecolor='w')
"""
fig = gcf()
if arg is None:
return fig.add_subplot(**kwargs)
else:
return fig.add_axes(arg, **kwargs)
def delaxes(ax=None):
"""
Remove an `~.axes.Axes` (defaulting to the current axes) from its figure.
"""
if ax is None:
ax = gca()
ax.remove()
def sca(ax):
"""
Set the current Axes to *ax* and the current Figure to the parent of *ax*.
"""
figure(ax.figure)
ax.figure.sca(ax)
def cla():
"""Clear the current axes."""
# Not generated via boilerplate.py to allow a different docstring.
return gca().cla()
## More ways of creating axes ##
@docstring.dedent_interpd
def subplot(*args, **kwargs):
"""
Add an Axes to the current figure or retrieve an existing Axes.
This is a wrapper of `.Figure.add_subplot` which provides additional
behavior when working with the implicit API (see the notes section).
Call signatures::
subplot(nrows, ncols, index, **kwargs)
subplot(pos, **kwargs)
subplot(**kwargs)
subplot(ax)
Parameters
----------
*args : int, (int, int, *index*), or `.SubplotSpec`, default: (1, 1, 1)
The position of the subplot described by one of
- Three integers (*nrows*, *ncols*, *index*). The subplot will take the
*index* position on a grid with *nrows* rows and *ncols* columns.
*index* starts at 1 in the upper left corner and increases to the
right. *index* can also be a two-tuple specifying the (*first*,
*last*) indices (1-based, and including *last*) of the subplot, e.g.,
``fig.add_subplot(3, 1, (1, 2))`` makes a subplot that spans the
upper 2/3 of the figure.
- A 3-digit integer. The digits are interpreted as if given separately
as three single-digit integers, i.e. ``fig.add_subplot(235)`` is the
same as ``fig.add_subplot(2, 3, 5)``. Note that this can only be used
if there are no more than 9 subplots.
- A `.SubplotSpec`.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the name
of a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : bool, default: False
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey. The
axis will have the same limits, ticks, and scale as the axis of the
shared axes.
label : str
A label for the returned axes.
Returns
-------
`.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
is used and `.projections.polar.PolarAxes` if polar projection
is used. The returned axes is then a subplot subclass of the
base class.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes_kwdoc)s
Notes
-----
Creating a new Axes will delete any pre-existing Axes that
overlaps with it beyond sharing a boundary::
import matplotlib.pyplot as plt
# plot a line, implicitly creating a subplot(111)
plt.plot([1, 2, 3])
# now create a subplot which represents the top plot of a grid
# with 2 rows and 1 column. Since this subplot will overlap the
# first, the plot (and its axes) previously created, will be removed
plt.subplot(211)
If you do not want this behavior, use the `.Figure.add_subplot` method
or the `.pyplot.axes` function instead.
If no *kwargs* are passed and there exists an Axes in the location
specified by *args* then that Axes will be returned rather than a new
Axes being created.
If *kwargs* are passed and there exists an Axes in the location
specified by *args*, the projection type is the same, and the
*kwargs* match with the existing Axes, then the existing Axes is
returned. Otherwise a new Axes is created with the specified
parameters. We save a reference to the *kwargs* which we use
for this comparison. If any of the values in *kwargs* are
mutable we will not detect the case where they are mutated.
In these cases we suggest using `.Figure.add_subplot` and the
explicit Axes API rather than the implicit pyplot API.
See Also
--------
.Figure.add_subplot
.pyplot.subplots
.pyplot.axes
.Figure.subplots
Examples
--------
::
plt.subplot(221)
# equivalent but more general
ax1 = plt.subplot(2, 2, 1)
# add a subplot with no frame
ax2 = plt.subplot(222, frameon=False)
# add a polar subplot
plt.subplot(223, projection='polar')
# add a red subplot that shares the x-axis with ax1
plt.subplot(224, sharex=ax1, facecolor='red')
# delete ax2 from the figure
plt.delaxes(ax2)
# add ax2 to the figure again
plt.subplot(ax2)
# make the first axes "current" again
plt.subplot(221)
"""
# Here we will only normalize `polar=True` vs `projection='polar'` and let
# downstream code deal with the rest.
unset = object()
projection = kwargs.get('projection', unset)
polar = kwargs.pop('polar', unset)
if polar is not unset and polar:
# if we got mixed messages from the user, raise
if projection is not unset and projection != 'polar':
raise ValueError(
f"polar={polar}, yet projection={projection!r}. "
"Only one of these arguments should be supplied."
)
kwargs['projection'] = projection = 'polar'
# if subplot called without arguments, create subplot(1, 1, 1)
if len(args) == 0:
args = (1, 1, 1)
# This check was added because it is very easy to type subplot(1, 2, False)
# when subplots(1, 2, False) was intended (sharex=False, that is). In most
# cases, no error will ever occur, but mysterious behavior can result
# because what was intended to be the sharex argument is instead treated as
# a subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool):
_api.warn_external("The subplot index argument to subplot() appears "
"to be a boolean. Did you intend to use "
"subplots()?")
# Check for nrows and ncols, which are not valid subplot args:
if 'nrows' in kwargs or 'ncols' in kwargs:
raise TypeError("subplot() got an unexpected keyword argument 'ncols' "
"and/or 'nrows'. Did you intend to call subplots()?")
fig = gcf()
# First, search for an existing subplot with a matching spec.
key = SubplotSpec._from_subplot_args(fig, args)
for ax in fig.axes:
# if we found an axes at the position sort out if we can re-use it
if hasattr(ax, 'get_subplotspec') and ax.get_subplotspec() == key:
# if the user passed no kwargs, re-use
if kwargs == {}:
break
# if the axes class and kwargs are identical, reuse
elif ax._projection_init == fig._process_projection_requirements(
*args, **kwargs
):
break
else:
# we have exhausted the known Axes and none match, make a new one!
ax = fig.add_subplot(*args, **kwargs)
fig.sca(ax)
bbox = ax.bbox
axes_to_delete = []
for other_ax in fig.axes:
if other_ax == ax:
continue
if bbox.fully_overlaps(other_ax.bbox):
axes_to_delete.append(other_ax)
for ax_to_del in axes_to_delete:
delaxes(ax_to_del)
return ax
@_api.make_keyword_only("3.3", "sharex")
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, gridspec_kw=None, **fig_kw):
"""
Create a figure and a set of subplots.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Parameters
----------
nrows, ncols : int, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of properties among x (*sharex*) or y (*sharey*)
axes:
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the first
column subplot are created. To later turn other subplots' ticklabels
on, use `~matplotlib.axes.Axes.tick_params`.
When subplots have a shared axis that has units, calling
`~matplotlib.axis.Axis.set_units` will update each axis with the
new units.
squeeze : bool, default: True
- If True, extra dimensions are squeezed out from the returned
array of `~matplotlib.axes.Axes`:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object is
always a 2D array containing Axes instances, even if it ends up
being 1x1.
subplot_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.figure.Figure.add_subplot` call used to create each
subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the `~matplotlib.gridspec.GridSpec`
constructor used to create the grid the subplots are placed on.
**fig_kw
All additional keyword arguments are passed to the
`.pyplot.figure` call.
Returns
-------
fig : `~.figure.Figure`
ax : `.axes.Axes` or array of Axes
*ax* can be either a single `~matplotlib.axes.Axes` object or an
array of Axes objects if more than one subplot was created. The
dimensions of the resulting array can be controlled with the squeeze
keyword, see above.
Typical idioms for handling the return value are::
# using the variable ax for single a Axes
fig, ax = plt.subplots()
# using the variable axs for multiple Axes
fig, axs = plt.subplots(2, 2)
# using tuple unpacking for multiple Axes
fig, (ax1, ax2) = plt.subplots(1, 2)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
The names ``ax`` and pluralized ``axs`` are preferred over ``axes``
because for the latter it's not clear if it refers to a single
`~.axes.Axes` instance or a collection of these.
See Also
--------
.pyplot.figure
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.Figure.add_subplot
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create just a figure and only one subplot
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar axes and access them through the returned array
fig, axs = plt.subplots(2, 2, subplot_kw=dict(projection="polar"))
axs[0, 0].plot(x, y)
axs[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
plt.subplots(2, 2, sharex=True, sharey=True)
# Create figure number 10 with a single subplot
# and clears it if it already exists.
fig, ax = plt.subplots(num=10, clear=True)
"""
fig = figure(**fig_kw)
axs = fig.subplots(nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
squeeze=squeeze, subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw)
return fig, axs
def subplot_mosaic(mosaic, *, subplot_kw=None, gridspec_kw=None,
empty_sentinel='.', **fig_kw):
"""
Build a layout of Axes based on ASCII art or nested lists.
This is a helper function to build complex GridSpec layouts visually.
.. note ::
This API is provisional and may be revised in the future based on
early user feedback.
Parameters
----------
mosaic : list of list of {hashable or nested} or str
A visual layout of how you want your Axes to be arranged
labeled as strings. For example ::
x = [['A panel', 'A panel', 'edge'],
['C panel', '.', 'edge']]
Produces 4 axes:
- 'A panel' which is 1 row high and spans the first two columns
- 'edge' which is 2 rows high and is on the right edge
- 'C panel' which in 1 row and 1 column wide in the bottom left
- a blank space 1 row and 1 column wide in the bottom center
Any of the entries in the layout can be a list of lists
of the same form to create nested layouts.
If input is a str, then it must be of the form ::
'''
AAE
C.E
'''
where each character is a column and each line is a row.
This only allows only single character Axes labels and does
not allow nesting but is very terse.
subplot_kw : dict, optional
Dictionary with keywords passed to the `.Figure.add_subplot` call
used to create each subplot.
gridspec_kw : dict, optional
Dictionary with keywords passed to the `.GridSpec` constructor used
to create the grid the subplots are placed on.
empty_sentinel : object, optional
Entry in the layout to mean "leave this space empty". Defaults
to ``'.'``. Note, if *layout* is a string, it is processed via
`inspect.cleandoc` to remove leading white space, which may
interfere with using white-space as the empty sentinel.
**fig_kw
All additional keyword arguments are passed to the
`.pyplot.figure` call.
Returns
-------
fig : `~.figure.Figure`
The new figure
dict[label, Axes]
A dictionary mapping the labels to the Axes objects. The order of
the axes is left-to-right and top-to-bottom of their position in the
total layout.
"""
fig = figure(**fig_kw)
ax_dict = fig.subplot_mosaic(
mosaic,
subplot_kw=subplot_kw,
gridspec_kw=gridspec_kw,
empty_sentinel=empty_sentinel
)
return fig, ax_dict
def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):
"""
Create a subplot at a specific location inside a regular grid.
Parameters
----------
shape : (int, int)
Number of rows and of columns of the grid in which to place axis.
loc : (int, int)
Row number and column number of the axis location within the grid.
rowspan : int, default: 1
Number of rows for the axis to span downwards.
colspan : int, default: 1
Number of columns for the axis to span to the right.
fig : `.Figure`, optional
Figure to place the subplot in. Defaults to the current figure.
**kwargs
Additional keyword arguments are handed to `~.Figure.add_subplot`.
Returns
-------
`.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on the
projection used. It is `~.axes.Axes` if rectilinear projection is used
and `.projections.polar.PolarAxes` if polar projection is used. The
returned axes is then a subplot subclass of the base class.
Notes
-----
The following call ::
ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan)
is identical to ::
fig = gcf()
gs = fig.add_gridspec(nrows, ncols)
ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])
"""
if fig is None:
fig = gcf()
rows, cols = shape
gs = GridSpec._check_gridspec_exists(fig, rows, cols)
subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan)
ax = fig.add_subplot(subplotspec, **kwargs)
bbox = ax.bbox
axes_to_delete = []
for other_ax in fig.axes:
if other_ax == ax:
continue
if bbox.fully_overlaps(other_ax.bbox):
axes_to_delete.append(other_ax)
for ax_to_del in axes_to_delete:
delaxes(ax_to_del)
return ax
def twinx(ax=None):
"""
Make and return a second axes that shares the *x*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be
on the right.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twinx()
return ax1
def twiny(ax=None):
"""
Make and return a second axes that shares the *y*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*), and its ticks will be
on the top.
Examples
--------
:doc:`/gallery/subplots_axes_and_figures/two_scales`
"""
if ax is None:
ax = gca()
ax1 = ax.twiny()
return ax1
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for a figure.
A `matplotlib.widgets.SubplotTool` instance is returned. You must maintain
a reference to the instance to keep the associated callbacks alive.
"""
if targetfig is None:
targetfig = gcf()
with rc_context({"toolbar": "none"}): # No navbar for the toolfig.
# Use new_figure_manager() instead of figure() so that the figure
# doesn't get registered with pyplot.
manager = new_figure_manager(-1, (6, 3))
manager.set_window_title("Subplot configuration tool")
tool_fig = manager.canvas.figure
tool_fig.subplots_adjust(top=0.9)
manager.show()
return SubplotTool(targetfig, tool_fig)
# After deprecation elapses, this can be autogenerated by boilerplate.py.
@_api.make_keyword_only("3.3", "pad")
def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Adjust the padding between and around subplots.
Parameters
----------
pad : float, default: 1.08
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, default: *pad*
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size.
rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)
A rectangle in normalized figure coordinates into which the whole
subplots area (including labels) will fit.
"""
gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
def box(on=None):
"""
Turn the axes box on or off on the current axes.
Parameters
----------
on : bool or None
The new `~matplotlib.axes.Axes` box state. If ``None``, toggle
the state.
See Also
--------
:meth:`matplotlib.axes.Axes.set_frame_on`
:meth:`matplotlib.axes.Axes.get_frame_on`
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
## Axis ##
def xlim(*args, **kwargs):
"""
Get or set the x limits of the current axes.
Call signatures::
left, right = xlim() # return the current xlim
xlim((left, right)) # set the xlim to left, right
xlim(left, right) # set the xlim to left, right
If you do not specify args, you can pass *left* or *right* as kwargs,
i.e.::
xlim(right=3) # adjust the right leaving left unchanged
xlim(left=1) # adjust the left leaving right unchanged
Setting limits turns autoscaling off for the x-axis.
Returns
-------
left, right
A tuple of the new x-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``xlim()``) is the pyplot
equivalent of calling `~.Axes.get_xlim` on the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xlim` on the current axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
return ret
def ylim(*args, **kwargs):
"""
Get or set the y-limits of the current axes.
Call signatures::
bottom, top = ylim() # return the current ylim
ylim((bottom, top)) # set the ylim to bottom, top
ylim(bottom, top) # set the ylim to bottom, top
If you do not specify args, you can alternatively pass *bottom* or
*top* as kwargs, i.e.::
ylim(top=3) # adjust the top leaving bottom unchanged
ylim(bottom=1) # adjust the bottom leaving top unchanged
Setting limits turns autoscaling off for the y-axis.
Returns
-------
bottom, top
A tuple of the new y-axis limits.
Notes
-----
Calling this function with no arguments (e.g. ``ylim()``) is the pyplot
equivalent of calling `~.Axes.get_ylim` on the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_ylim` on the current axes. All arguments are passed though.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
return ret
def xticks(ticks=None, labels=None, **kwargs):
"""
Get or set the current tick locations and labels of the x-axis.
Pass no arguments to return the current values without modifying them.
Parameters
----------
ticks : array-like, optional
The list of xtick locations. Passing an empty list removes all xticks.
labels : array-like, optional
The labels to place at the given *ticks* locations. This argument can
only be passed if *ticks* is passed as well.
**kwargs
`.Text` properties can be used to control the appearance of the labels.
Returns
-------
locs
The list of xtick locations.
labels
The list of xlabel `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``xticks()``) is the pyplot
equivalent of calling `~.Axes.get_xticks` and `~.Axes.get_xticklabels` on
the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_xticks` and `~.Axes.set_xticklabels` on the current axes.
Examples
--------
>>> locs, labels = xticks() # Get the current locations and labels.
>>> xticks(np.arange(0, 1, step=0.2)) # Set label locations.
>>> xticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.
>>> xticks([0, 1, 2], ['January', 'February', 'March'],
... rotation=20) # Set text labels and properties.
>>> xticks([]) # Disable xticks.
"""
ax = gca()
if ticks is None:
locs = ax.get_xticks()
if labels is not None:
raise TypeError("xticks(): Parameter 'labels' can't be set "
"without setting 'ticks'")
else:
locs = ax.set_xticks(ticks)
if labels is None:
labels = ax.get_xticklabels()
else:
labels = ax.set_xticklabels(labels, **kwargs)
for l in labels:
l.update(kwargs)
return locs, labels
def yticks(ticks=None, labels=None, **kwargs):
"""
Get or set the current tick locations and labels of the y-axis.
Pass no arguments to return the current values without modifying them.
Parameters
----------
ticks : array-like, optional
The list of ytick locations. Passing an empty list removes all yticks.
labels : array-like, optional
The labels to place at the given *ticks* locations. This argument can
only be passed if *ticks* is passed as well.
**kwargs
`.Text` properties can be used to control the appearance of the labels.
Returns
-------
locs
The list of ytick locations.
labels
The list of ylabel `.Text` objects.
Notes
-----
Calling this function with no arguments (e.g. ``yticks()``) is the pyplot
equivalent of calling `~.Axes.get_yticks` and `~.Axes.get_yticklabels` on
the current axes.
Calling this function with arguments is the pyplot equivalent of calling
`~.Axes.set_yticks` and `~.Axes.set_yticklabels` on the current axes.
Examples
--------
>>> locs, labels = yticks() # Get the current locations and labels.
>>> yticks(np.arange(0, 1, step=0.2)) # Set label locations.
>>> yticks(np.arange(3), ['Tom', 'Dick', 'Sue']) # Set text labels.
>>> yticks([0, 1, 2], ['January', 'February', 'March'],
... rotation=45) # Set text labels and properties.
>>> yticks([]) # Disable yticks.
"""
ax = gca()
if ticks is None:
locs = ax.get_yticks()
if labels is not None:
raise TypeError("yticks(): Parameter 'labels' can't be set "
"without setting 'ticks'")
else:
locs = ax.set_yticks(ticks)
if labels is None:
labels = ax.get_yticklabels()
else:
labels = ax.set_yticklabels(labels, **kwargs)
for l in labels:
l.update(kwargs)
return locs, labels
def rgrids(radii=None, labels=None, angle=None, fmt=None, **kwargs):
"""
Get or set the radial gridlines on the current polar plot.
Call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, fmt=None, **kwargs)
When called with no arguments, `.rgrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified radial distances and angle.
Parameters
----------
radii : tuple with floats
The radii for the radial gridlines
labels : tuple with strings or None
The labels to use at each radial gridline. The
`matplotlib.ticker.ScalarFormatter` will be used if None.
angle : float
The angular position of the radius labels in degrees.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'.
Returns
-------
lines : list of `.lines.Line2D`
The radial gridlines.
labels : list of `.text.Text`
The tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
See Also
--------
.pyplot.thetagrids
.projections.polar.PolarAxes.set_rgrids
.Axis.get_gridlines
.Axis.get_ticklabels
Examples
--------
::
# set the locations of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' ))
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if all(p is None for p in [radii, labels, angle, fmt]) and not kwargs:
lines = ax.yaxis.get_gridlines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(
radii, labels=labels, angle=angle, fmt=fmt, **kwargs)
return lines, labels
def thetagrids(angles=None, labels=None, fmt=None, **kwargs):
"""
Get or set the theta gridlines on the current polar plot.
Call signatures::
lines, labels = thetagrids()
lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs)
When called with no arguments, `.thetagrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified angles.
Parameters
----------
angles : tuple with floats, degrees
The angles of the theta gridlines.
labels : tuple with strings or None
The labels to use at each radial gridline. The
`.projections.polar.ThetaFormatter` will be used if None.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'. Note that the angle in radians will be used.
Returns
-------
lines : list of `.lines.Line2D`
The theta gridlines.
labels : list of `.text.Text`
The tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
See Also
--------
.pyplot.rgrids
.projections.polar.PolarAxes.set_thetagrids
.Axis.get_gridlines
.Axis.get_ticklabels
Examples
--------
::
# set the locations of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90))
# set the locations and labels of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE'))
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('thetagrids only defined for polar axes')
if all(param is None for param in [angles, labels, fmt]) and not kwargs:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(angles,
labels=labels, fmt=fmt, **kwargs)
return lines, labels
## Plotting Info ##
def plotting():
pass
def get_plot_commands():
"""
Get a sorted list of all of the plotting commands.
"""
# This works by searching for all functions in this module and removing
# a few hard-coded exclusions, as well as all of the colormap-setting
# functions, and anything marked as private with a preceding underscore.
exclude = {'colormaps', 'colors', 'connect', 'disconnect',
'get_plot_commands', 'get_current_fig_manager', 'ginput',
'plotting', 'waitforbuttonpress'}
exclude |= set(colormaps())
this_module = inspect.getmodule(get_plot_commands)
return sorted(
name for name, obj in globals().items()
if not name.startswith('_') and name not in exclude
and inspect.isfunction(obj)
and inspect.getmodule(obj) is this_module)
def colormaps():
"""
Matplotlib provides a number of colormaps, and others can be added using
:func:`~matplotlib.cm.register_cmap`. This function documents the built-in
colormaps, and will also return a list of all registered colormaps if
called.
You can set the colormap for an image, pcolor, scatter, etc,
using a keyword argument::
imshow(X, cmap=cm.hot)
or using the :func:`set_cmap` function::
imshow(X)
pyplot.set_cmap('hot')
pyplot.set_cmap('jet')
In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
allowing you to see which one works best for your data.
All built-in colormaps can be reversed by appending ``_r``: For instance,
``gray_r`` is the reverse of ``gray``.
There are several common color schemes used in visualization:
Sequential schemes
for unipolar data that progresses from low to high
Diverging schemes
for bipolar data that emphasizes positive or negative deviations from a
central value
Cyclic schemes
for plotting values that wrap around at the endpoints, such as phase
angle, wind direction, or time of day
Qualitative schemes
for nominal data that has no inherent ordering, where color is used
only to distinguish categories
Matplotlib ships with 4 perceptually uniform colormaps which are
the recommended colormaps for sequential data:
========= ===================================================
Colormap Description
========= ===================================================
inferno perceptually uniform shades of black-red-yellow
magma perceptually uniform shades of black-red-white
plasma perceptually uniform shades of blue-red-yellow
viridis perceptually uniform shades of blue-green-yellow
========= ===================================================
The following colormaps are based on the `ColorBrewer
<https://colorbrewer2.org>`_ color specifications and designs developed by
Cynthia Brewer:
ColorBrewer Diverging (luminance is highest at the midpoint, and
decreases towards differently-colored endpoints):
======== ===================================
Colormap Description
======== ===================================
BrBG brown, white, blue-green
PiYG pink, white, yellow-green
PRGn purple, white, green
PuOr orange, white, purple
RdBu red, white, blue
RdGy red, white, gray
RdYlBu red, yellow, blue
RdYlGn red, yellow, green
Spectral red, orange, yellow, green, blue
======== ===================================
ColorBrewer Sequential (luminance decreases monotonically):
======== ====================================
Colormap Description
======== ====================================
Blues white to dark blue
BuGn white, light blue, dark green
BuPu white, light blue, dark purple
GnBu white, light green, dark blue
Greens white to dark green
Greys white to black (not linear)
Oranges white, orange, dark brown
OrRd white, orange, dark red
PuBu white, light purple, dark blue
PuBuGn white, light purple, dark green
PuRd white, light purple, dark red
Purples white to dark purple
RdPu white, pink, dark purple
Reds white to dark red
YlGn light yellow, dark green
YlGnBu light yellow, light green, dark blue
YlOrBr light yellow, orange, dark brown
YlOrRd light yellow, orange, dark red
======== ====================================
ColorBrewer Qualitative:
(For plotting nominal data, `.ListedColormap` is used,
not `.LinearSegmentedColormap`. Different sets of colors are
recommended for different numbers of categories.)
* Accent
* Dark2
* Paired
* Pastel1
* Pastel2
* Set1
* Set2
* Set3
A set of colormaps derived from those of the same name provided
with Matlab are also included:
========= =======================================================
Colormap Description
========= =======================================================
autumn sequential linearly-increasing shades of red-orange-yellow
bone sequential increasing black-white colormap with
a tinge of blue, to emulate X-ray film
cool linearly-decreasing shades of cyan-magenta
copper sequential increasing shades of black-copper
flag repetitive red-white-blue-black pattern (not cyclic at
endpoints)
gray sequential linearly-increasing black-to-white
grayscale
hot sequential black-red-yellow-white, to emulate blackbody
radiation from an object at increasing temperatures
jet a spectral map with dark endpoints, blue-cyan-yellow-red;
based on a fluid-jet simulation by NCSA [#]_
pink sequential increasing pastel black-pink-white, meant
for sepia tone colorization of photographs
prism repetitive red-yellow-green-blue-purple-...-green pattern
(not cyclic at endpoints)
spring linearly-increasing shades of magenta-yellow
summer sequential linearly-increasing shades of green-yellow
winter linearly-increasing shades of blue-green
========= =======================================================
A set of palettes from the `Yorick scientific visualisation
package <https://dhmunro.github.io/yorick-doc/>`_, an evolution of
the GIST package, both by David H. Munro are included:
============ =======================================================
Colormap Description
============ =======================================================
gist_earth mapmaker's colors from dark blue deep ocean to green
lowlands to brown highlands to white mountains
gist_heat sequential increasing black-red-orange-white, to emulate
blackbody radiation from an iron bar as it grows hotter
gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
colormap from National Center for Atmospheric
Research [#]_
gist_rainbow runs through the colors in spectral order from red to
violet at full saturation (like *hsv* but not cyclic)
gist_stern "Stern special" color table from Interactive Data
Language software
============ =======================================================
A set of cyclic colormaps:
================ =================================================
Colormap Description
================ =================================================
hsv red-yellow-green-cyan-blue-magenta-red, formed by
changing the hue component in the HSV color space
twilight perceptually uniform shades of
white-blue-black-red-white
twilight_shifted perceptually uniform shades of
black-blue-white-red-black
================ =================================================
Other miscellaneous schemes:
============= =======================================================
Colormap Description
============= =======================================================
afmhot sequential black-orange-yellow-white blackbody
spectrum, commonly used in atomic force microscopy
brg blue-red-green
bwr diverging blue-white-red
coolwarm diverging blue-gray-red, meant to avoid issues with 3D
shading, color blindness, and ordering of colors [#]_
CMRmap "Default colormaps on color images often reproduce to
confusing grayscale images. The proposed colormap
maintains an aesthetically pleasing color image that
automatically reproduces to a monotonic grayscale with
discrete, quantifiable saturation levels." [#]_
cubehelix Unlike most other color schemes cubehelix was designed
by D.A. Green to be monotonically increasing in terms
of perceived brightness. Also, when printed on a black
and white postscript printer, the scheme results in a
greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the (r, g, b)
values produced can be visualised as a squashed helix
around the diagonal in the (r, g, b) color cube.
gnuplot gnuplot's traditional pm3d scheme
(black-blue-red-yellow)
gnuplot2 sequential color printable as gray
(black-blue-violet-yellow-white)
ocean green-blue-white
rainbow spectral purple-blue-green-yellow-orange-red colormap
with diverging luminance
seismic diverging blue-white-red
nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
originally from the Neuroimaging in Python project
terrain mapmaker's colors, blue-green-yellow-brown-white,
originally from IGOR Pro
turbo Spectral map (purple-blue-green-yellow-orange-red) with
a bright center and darker endpoints. A smoother
alternative to jet.
============= =======================================================
The following colormaps are redundant and may be removed in future
versions. It's recommended to use the names in the descriptions
instead, which produce identical output:
========= =======================================================
Colormap Description
========= =======================================================
gist_gray identical to *gray*
gist_yarg identical to *gray_r*
binary identical to *gray_r*
========= =======================================================
.. rubric:: Footnotes
.. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
choice for scientific visualization by many researchers: `Rainbow Color
Map (Still) Considered Harmful
<https://ieeexplore.ieee.org/document/4118486/?arnumber=4118486>`_
.. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
Language. See `Color Table Gallery
<https://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
.. [#] See `Diverging Color Maps for Scientific Visualization
<http://www.kennethmoreland.com/color-maps/>`_ by Kenneth Moreland.
.. [#] See `A Color Map for Effective Black-and-White Rendering of
Color-Scale Images
<https://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
by Carey Rappaport
"""
return sorted(cm._cmap_registry)
def _setup_pyplot_info_docstrings():
"""
Setup the docstring of `plotting` and of the colormap-setting functions.
These must be done after the entire module is imported, so it is called
from the end of this module, which is generated by boilerplate.py.
"""
commands = get_plot_commands()
first_sentence = re.compile(r"(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
# Collect the first sentence of the docstring for all of the
# plotting commands.
rows = []
max_name = len("Function")
max_summary = len("Description")
for name in commands:
doc = globals()[name].__doc__
summary = ''
if doc is not None:
match = first_sentence.match(doc)
if match is not None:
summary = inspect.cleandoc(match.group(0)).replace('\n', ' ')
name = '`%s`' % name
rows.append([name, summary])
max_name = max(max_name, len(name))
max_summary = max(max_summary, len(summary))
separator = '=' * max_name + ' ' + '=' * max_summary
lines = [
separator,
'{:{}} {:{}}'.format('Function', max_name, 'Description', max_summary),
separator,
] + [
'{:{}} {:{}}'.format(name, max_name, summary, max_summary)
for name, summary in rows
] + [
separator,
]
plotting.__doc__ = '\n'.join(lines)
for cm_name in colormaps():
if cm_name in globals():
globals()[cm_name].__doc__ = f"""
Set the colormap to {cm_name!r}.
This changes the default colormap as well as the colormap of the current
image if there is one. See ``help(colormaps)`` for more information.
"""
## Plotting part 1: manually generated functions and wrappers ##
@_copy_docstring_and_deprecators(Figure.colorbar)
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
ret = gcf().colorbar(mappable, cax=cax, ax=ax, **kw)
return ret
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image.
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images, use
`~.ScalarMappable.set_clim` on every image, for example::
for im in gca().get_images():
im.set_clim(0, 0.5)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, e.g., with imshow')
im.set_clim(vmin, vmax)
def set_cmap(cmap):
"""
Set the default colormap, and applies it to the current image if any.
Parameters
----------
cmap : `~matplotlib.colors.Colormap` or str
A colormap instance or the name of a registered colormap.
See Also
--------
colormaps
matplotlib.cm.register_cmap
matplotlib.cm.get_cmap
"""
cmap = cm.get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
@_copy_docstring_and_deprecators(matplotlib.image.imread)
def imread(fname, format=None):
return matplotlib.image.imread(fname, format)
@_copy_docstring_and_deprecators(matplotlib.image.imsave)
def imsave(fname, arr, **kwargs):
return matplotlib.image.imsave(fname, arr, **kwargs)
def matshow(A, fignum=None, **kwargs):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
Parameters
----------
A : 2D array-like
The matrix to be displayed.
fignum : None or int or False
If *None*, create a new figure window with automatic numbering.
If a nonzero integer, draw into the figure with the given number
(create it if it does not exist).
If 0, use the current axes (or create one if it does not exist).
.. note::
Because of how `.Axes.matshow` tries to set the figure aspect
ratio to be the one of the array, strange things may happen if you
reuse an existing figure.
Returns
-------
`~matplotlib.image.AxesImage`
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes.imshow` arguments
"""
A = np.asanyarray(A)
if fignum == 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized
# figure.
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kwargs)
sci(im)
return im
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format strings, as in
`plot`.
"""
# If an axis already exists, check if it has a polar projection
if gcf().get_axes():
ax = gca()
if isinstance(ax, PolarAxes):
return ax
else:
_api.warn_external('Trying to create polar plot on an Axes '
'that does not have a polar projection.')
ax = axes(projection="polar")
ret = ax.plot(*args, **kwargs)
return ret
# If rcParams['backend_fallback'] is true, and an interactive backend is
# requested, ignore rcParams['backend'] and force selection of a backend that
# is compatible with the current running interactive framework.
if (rcParams["backend_fallback"]
and dict.__getitem__(rcParams, "backend") in (
set(_interactive_bk) - {'WebAgg', 'nbAgg'})
and cbook._get_running_interactive_framework()):
dict.__setitem__(rcParams, "backend", rcsetup._auto_backend_sentinel)
# Set up the backend.
switch_backend(rcParams["backend"])
# Just to be safe. Interactive mode can be turned on without
# calling `plt.ion()` so register it again here.
# This is safe because multiple calls to `install_repl_displayhook`
# are no-ops and the registered function respect `mpl.is_interactive()`
# to determine if they should trigger a draw.
install_repl_displayhook()
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.figimage)
def figimage(
X, xo=0, yo=0, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, origin=None, resize=False, **kwargs):
return gcf().figimage(
X, xo=xo, yo=yo, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, origin=origin, resize=resize, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.text)
def figtext(x, y, s, fontdict=None, **kwargs):
return gcf().text(x, y, s, fontdict=fontdict, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.gca)
def gca(**kwargs):
return gcf().gca(**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure._gci)
def gci():
return gcf()._gci()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.ginput)
def ginput(
n=1, timeout=30, show_clicks=True,
mouse_add=MouseButton.LEFT, mouse_pop=MouseButton.RIGHT,
mouse_stop=MouseButton.MIDDLE):
return gcf().ginput(
n=n, timeout=timeout, show_clicks=show_clicks,
mouse_add=mouse_add, mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.subplots_adjust)
def subplots_adjust(
left=None, bottom=None, right=None, top=None, wspace=None,
hspace=None):
return gcf().subplots_adjust(
left=left, bottom=bottom, right=right, top=top, wspace=wspace,
hspace=hspace)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.suptitle)
def suptitle(t, **kwargs):
return gcf().suptitle(t, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Figure.waitforbuttonpress)
def waitforbuttonpress(timeout=-1):
return gcf().waitforbuttonpress(timeout=timeout)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.acorr)
def acorr(x, *, data=None, **kwargs):
return gca().acorr(
x, **({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.angle_spectrum)
def angle_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,
data=None, **kwargs):
return gca().angle_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.annotate)
def annotate(text, xy, *args, **kwargs):
return gca().annotate(text, xy, *args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.arrow)
def arrow(x, y, dx, dy, **kwargs):
return gca().arrow(x, y, dx, dy, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.autoscale)
def autoscale(enable=True, axis='both', tight=None):
return gca().autoscale(enable=enable, axis=axis, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axhline)
def axhline(y=0, xmin=0, xmax=1, **kwargs):
return gca().axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axhspan)
def axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs):
return gca().axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axis)
def axis(*args, emit=True, **kwargs):
return gca().axis(*args, emit=emit, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axline)
def axline(xy1, xy2=None, *, slope=None, **kwargs):
return gca().axline(xy1, xy2=xy2, slope=slope, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axvline)
def axvline(x=0, ymin=0, ymax=1, **kwargs):
return gca().axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.axvspan)
def axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs):
return gca().axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.bar)
def bar(
x, height, width=0.8, bottom=None, *, align='center',
data=None, **kwargs):
return gca().bar(
x, height, width=width, bottom=bottom, align=align,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.barbs)
def barbs(*args, data=None, **kw):
return gca().barbs(
*args, **({"data": data} if data is not None else {}), **kw)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.barh)
def barh(y, width, height=0.8, left=None, *, align='center', **kwargs):
return gca().barh(
y, width, height=height, left=left, align=align, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.bar_label)
def bar_label(
container, labels=None, *, fmt='%g', label_type='edge',
padding=0, **kwargs):
return gca().bar_label(
container, labels=labels, fmt=fmt, label_type=label_type,
padding=padding, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.boxplot)
def boxplot(
x, notch=None, sym=None, vert=None, whis=None,
positions=None, widths=None, patch_artist=None,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=None, showmeans=None, showcaps=None, showbox=None,
showfliers=None, boxprops=None, labels=None, flierprops=None,
medianprops=None, meanprops=None, capprops=None,
whiskerprops=None, manage_ticks=True, autorange=False,
zorder=None, *, data=None):
return gca().boxplot(
x, notch=notch, sym=sym, vert=vert, whis=whis,
positions=positions, widths=widths, patch_artist=patch_artist,
bootstrap=bootstrap, usermedians=usermedians,
conf_intervals=conf_intervals, meanline=meanline,
showmeans=showmeans, showcaps=showcaps, showbox=showbox,
showfliers=showfliers, boxprops=boxprops, labels=labels,
flierprops=flierprops, medianprops=medianprops,
meanprops=meanprops, capprops=capprops,
whiskerprops=whiskerprops, manage_ticks=manage_ticks,
autorange=autorange, zorder=zorder,
**({"data": data} if data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.broken_barh)
def broken_barh(xranges, yrange, *, data=None, **kwargs):
return gca().broken_barh(
xranges, yrange,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.clabel)
def clabel(CS, levels=None, **kwargs):
return gca().clabel(CS, levels=levels, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.cohere)
def cohere(
x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, *, data=None, **kwargs):
return gca().cohere(
x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.contour)
def contour(*args, data=None, **kwargs):
__ret = gca().contour(
*args, **({"data": data} if data is not None else {}),
**kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.contourf)
def contourf(*args, data=None, **kwargs):
__ret = gca().contourf(
*args, **({"data": data} if data is not None else {}),
**kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.csd)
def csd(
x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, *, data=None, **kwargs):
return gca().csd(
x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, return_line=return_line,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.errorbar)
def errorbar(
x, y, yerr=None, xerr=None, fmt='', ecolor=None,
elinewidth=None, capsize=None, barsabove=False, lolims=False,
uplims=False, xlolims=False, xuplims=False, errorevery=1,
capthick=None, *, data=None, **kwargs):
return gca().errorbar(
x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
elinewidth=elinewidth, capsize=capsize, barsabove=barsabove,
lolims=lolims, uplims=uplims, xlolims=xlolims,
xuplims=xuplims, errorevery=errorevery, capthick=capthick,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.eventplot)
def eventplot(
positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', *, data=None, **kwargs):
return gca().eventplot(
positions, orientation=orientation, lineoffsets=lineoffsets,
linelengths=linelengths, linewidths=linewidths, colors=colors,
linestyles=linestyles,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill)
def fill(*args, data=None, **kwargs):
return gca().fill(
*args, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill_between)
def fill_between(
x, y1, y2=0, where=None, interpolate=False, step=None, *,
data=None, **kwargs):
return gca().fill_between(
x, y1, y2=y2, where=where, interpolate=interpolate, step=step,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.fill_betweenx)
def fill_betweenx(
y, x1, x2=0, where=None, step=None, interpolate=False, *,
data=None, **kwargs):
return gca().fill_betweenx(
y, x1, x2=x2, where=where, step=step, interpolate=interpolate,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.grid)
def grid(b=None, which='major', axis='both', **kwargs):
return gca().grid(b=b, which=which, axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hexbin)
def hexbin(
x, y, C=None, gridsize=100, bins=None, xscale='linear',
yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, edgecolors='face',
reduce_C_function=np.mean, mincnt=None, marginals=False, *,
data=None, **kwargs):
__ret = gca().hexbin(
x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
yscale=yscale, extent=extent, cmap=cmap, norm=norm, vmin=vmin,
vmax=vmax, alpha=alpha, linewidths=linewidths,
edgecolors=edgecolors, reduce_C_function=reduce_C_function,
mincnt=mincnt, marginals=marginals,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hist)
def hist(
x, bins=None, range=None, density=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, color=None,
label=None, stacked=False, *, data=None, **kwargs):
return gca().hist(
x, bins=bins, range=range, density=density, weights=weights,
cumulative=cumulative, bottom=bottom, histtype=histtype,
align=align, orientation=orientation, rwidth=rwidth, log=log,
color=color, label=label, stacked=stacked,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stairs)
def stairs(
values, edges=None, *, orientation='vertical', baseline=0,
fill=False, data=None, **kwargs):
return gca().stairs(
values, edges=edges, orientation=orientation,
baseline=baseline, fill=fill,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hist2d)
def hist2d(
x, y, bins=10, range=None, density=False, weights=None,
cmin=None, cmax=None, *, data=None, **kwargs):
__ret = gca().hist2d(
x, y, bins=bins, range=range, density=density,
weights=weights, cmin=cmin, cmax=cmax,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.hlines)
def hlines(
y, xmin, xmax, colors=None, linestyles='solid', label='', *,
data=None, **kwargs):
return gca().hlines(
y, xmin, xmax, colors=colors, linestyles=linestyles,
label=label, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.imshow)
def imshow(
X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=None, vmin=None, vmax=None, origin=None, extent=None, *,
filternorm=True, filterrad=4.0, resample=None, url=None,
data=None, **kwargs):
__ret = gca().imshow(
X, cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha, vmin=vmin,
vmax=vmax, origin=origin, extent=extent,
filternorm=filternorm, filterrad=filterrad, resample=resample,
url=url, **({"data": data} if data is not None else {}),
**kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.legend)
def legend(*args, **kwargs):
return gca().legend(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.locator_params)
def locator_params(axis='both', tight=None, **kwargs):
return gca().locator_params(axis=axis, tight=tight, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.loglog)
def loglog(*args, **kwargs):
return gca().loglog(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.magnitude_spectrum)
def magnitude_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
scale=None, *, data=None, **kwargs):
return gca().magnitude_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
scale=scale, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.margins)
def margins(*margins, x=None, y=None, tight=True):
return gca().margins(*margins, x=x, y=y, tight=tight)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.minorticks_off)
def minorticks_off():
return gca().minorticks_off()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.minorticks_on)
def minorticks_on():
return gca().minorticks_on()
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pcolor)
def pcolor(
*args, shading=None, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, data=None, **kwargs):
__ret = gca().pcolor(
*args, shading=shading, alpha=alpha, norm=norm, cmap=cmap,
vmin=vmin, vmax=vmax,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pcolormesh)
def pcolormesh(
*args, alpha=None, norm=None, cmap=None, vmin=None,
vmax=None, shading=None, antialiased=False, data=None,
**kwargs):
__ret = gca().pcolormesh(
*args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, shading=shading, antialiased=antialiased,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.phase_spectrum)
def phase_spectrum(
x, Fs=None, Fc=None, window=None, pad_to=None, sides=None, *,
data=None, **kwargs):
return gca().phase_spectrum(
x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to, sides=sides,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.pie)
def pie(
x, explode=None, labels=None, colors=None, autopct=None,
pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=0, radius=1, counterclock=True, wedgeprops=None,
textprops=None, center=(0, 0), frame=False,
rotatelabels=False, *, normalize=None, data=None):
return gca().pie(
x, explode=explode, labels=labels, colors=colors,
autopct=autopct, pctdistance=pctdistance, shadow=shadow,
labeldistance=labeldistance, startangle=startangle,
radius=radius, counterclock=counterclock,
wedgeprops=wedgeprops, textprops=textprops, center=center,
frame=frame, rotatelabels=rotatelabels, normalize=normalize,
**({"data": data} if data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.plot)
def plot(*args, scalex=True, scaley=True, data=None, **kwargs):
return gca().plot(
*args, scalex=scalex, scaley=scaley,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.plot_date)
def plot_date(
x, y, fmt='o', tz=None, xdate=True, ydate=False, *,
data=None, **kwargs):
return gca().plot_date(
x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.psd)
def psd(
x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, *, data=None, **kwargs):
return gca().psd(
x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, return_line=return_line,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.quiver)
def quiver(*args, data=None, **kw):
__ret = gca().quiver(
*args, **({"data": data} if data is not None else {}), **kw)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.quiverkey)
def quiverkey(Q, X, Y, U, label, **kw):
return gca().quiverkey(Q, X, Y, U, label, **kw)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.scatter)
def scatter(
x, y, s=None, c=None, marker=None, cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None, *,
edgecolors=None, plotnonfinite=False, data=None, **kwargs):
__ret = gca().scatter(
x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha, linewidths=linewidths,
edgecolors=edgecolors, plotnonfinite=plotnonfinite,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.semilogx)
def semilogx(*args, **kwargs):
return gca().semilogx(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.semilogy)
def semilogy(*args, **kwargs):
return gca().semilogy(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.specgram)
def specgram(
x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, cmap=None, xextent=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, *, data=None, **kwargs):
__ret = gca().specgram(
x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend, window=window,
noverlap=noverlap, cmap=cmap, xextent=xextent, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, mode=mode,
scale=scale, vmin=vmin, vmax=vmax,
**({"data": data} if data is not None else {}), **kwargs)
sci(__ret[-1])
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.spy)
def spy(
Z, precision=0, marker=None, markersize=None, aspect='equal',
origin='upper', **kwargs):
__ret = gca().spy(
Z, precision=precision, marker=marker, markersize=markersize,
aspect=aspect, origin=origin, **kwargs)
if isinstance(__ret, cm.ScalarMappable): sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stackplot)
def stackplot(
x, *args, labels=(), colors=None, baseline='zero', data=None,
**kwargs):
return gca().stackplot(
x, *args, labels=labels, colors=colors, baseline=baseline,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.stem)
def stem(
*args, linefmt=None, markerfmt=None, basefmt=None, bottom=0,
label=None, use_line_collection=True, orientation='vertical',
data=None):
return gca().stem(
*args, linefmt=linefmt, markerfmt=markerfmt, basefmt=basefmt,
bottom=bottom, label=label,
use_line_collection=use_line_collection,
orientation=orientation,
**({"data": data} if data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.step)
def step(x, y, *args, where='pre', data=None, **kwargs):
return gca().step(
x, y, *args, where=where,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.streamplot)
def streamplot(
x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
transform=None, zorder=None, start_points=None, maxlength=4.0,
integration_direction='both', *, data=None):
__ret = gca().streamplot(
x, y, u, v, density=density, linewidth=linewidth, color=color,
cmap=cmap, norm=norm, arrowsize=arrowsize,
arrowstyle=arrowstyle, minlength=minlength,
transform=transform, zorder=zorder, start_points=start_points,
maxlength=maxlength,
integration_direction=integration_direction,
**({"data": data} if data is not None else {}))
sci(__ret.lines)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.table)
def table(
cellText=None, cellColours=None, cellLoc='right',
colWidths=None, rowLabels=None, rowColours=None,
rowLoc='left', colLabels=None, colColours=None,
colLoc='center', loc='bottom', bbox=None, edges='closed',
**kwargs):
return gca().table(
cellText=cellText, cellColours=cellColours, cellLoc=cellLoc,
colWidths=colWidths, rowLabels=rowLabels,
rowColours=rowColours, rowLoc=rowLoc, colLabels=colLabels,
colColours=colColours, colLoc=colLoc, loc=loc, bbox=bbox,
edges=edges, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.text)
def text(x, y, s, fontdict=None, **kwargs):
return gca().text(x, y, s, fontdict=fontdict, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tick_params)
def tick_params(axis='both', **kwargs):
return gca().tick_params(axis=axis, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.ticklabel_format)
def ticklabel_format(
*, axis='both', style='', scilimits=None, useOffset=None,
useLocale=None, useMathText=None):
return gca().ticklabel_format(
axis=axis, style=style, scilimits=scilimits,
useOffset=useOffset, useLocale=useLocale,
useMathText=useMathText)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tricontour)
def tricontour(*args, **kwargs):
__ret = gca().tricontour(*args, **kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tricontourf)
def tricontourf(*args, **kwargs):
__ret = gca().tricontourf(*args, **kwargs)
if __ret._A is not None: sci(__ret) # noqa
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.tripcolor)
def tripcolor(
*args, alpha=1.0, norm=None, cmap=None, vmin=None, vmax=None,
shading='flat', facecolors=None, **kwargs):
__ret = gca().tripcolor(
*args, alpha=alpha, norm=norm, cmap=cmap, vmin=vmin,
vmax=vmax, shading=shading, facecolors=facecolors, **kwargs)
sci(__ret)
return __ret
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.triplot)
def triplot(*args, **kwargs):
return gca().triplot(*args, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.violinplot)
def violinplot(
dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
quantiles=None, points=100, bw_method=None, *, data=None):
return gca().violinplot(
dataset, positions=positions, vert=vert, widths=widths,
showmeans=showmeans, showextrema=showextrema,
showmedians=showmedians, quantiles=quantiles, points=points,
bw_method=bw_method,
**({"data": data} if data is not None else {}))
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.vlines)
def vlines(
x, ymin, ymax, colors=None, linestyles='solid', label='', *,
data=None, **kwargs):
return gca().vlines(
x, ymin, ymax, colors=colors, linestyles=linestyles,
label=label, **({"data": data} if data is not None else {}),
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.xcorr)
def xcorr(
x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, *, data=None, **kwargs):
return gca().xcorr(
x, y, normed=normed, detrend=detrend, usevlines=usevlines,
maxlags=maxlags,
**({"data": data} if data is not None else {}), **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes._sci)
def sci(im):
return gca()._sci(im)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_title)
def title(label, fontdict=None, loc=None, pad=None, *, y=None, **kwargs):
return gca().set_title(
label, fontdict=fontdict, loc=loc, pad=pad, y=y, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_xlabel)
def xlabel(xlabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):
return gca().set_xlabel(
xlabel, fontdict=fontdict, labelpad=labelpad, loc=loc,
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_ylabel)
def ylabel(ylabel, fontdict=None, labelpad=None, *, loc=None, **kwargs):
return gca().set_ylabel(
ylabel, fontdict=fontdict, labelpad=labelpad, loc=loc,
**kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_xscale)
def xscale(value, **kwargs):
return gca().set_xscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
@_copy_docstring_and_deprecators(Axes.set_yscale)
def yscale(value, **kwargs):
return gca().set_yscale(value, **kwargs)
# Autogenerated by boilerplate.py. Do not edit as changes will be lost.
def autumn(): set_cmap('autumn')
def bone(): set_cmap('bone')
def cool(): set_cmap('cool')
def copper(): set_cmap('copper')
def flag(): set_cmap('flag')
def gray(): set_cmap('gray')
def hot(): set_cmap('hot')
def hsv(): set_cmap('hsv')
def jet(): set_cmap('jet')
def pink(): set_cmap('pink')
def prism(): set_cmap('prism')
def spring(): set_cmap('spring')
def summer(): set_cmap('summer')
def winter(): set_cmap('winter')
def magma(): set_cmap('magma')
def inferno(): set_cmap('inferno')
def plasma(): set_cmap('plasma')
def viridis(): set_cmap('viridis')
def nipy_spectral(): set_cmap('nipy_spectral')
_setup_pyplot_info_docstrings()
|
import json
from unittest.mock import mock_open, patch
import pytest
from satosa.exception import SATOSAConfigurationError
from satosa.exception import SATOSAConfigurationError
from satosa.satosa_config import SATOSAConfig
class TestSATOSAConfig:
@pytest.fixture
def non_sensitive_config_dict(self):
"""Returns config without sensitive data (secret keys)."""
config = {
"BASE": "https://example.com",
"COOKIE_STATE_NAME": "TEST_STATE",
"BACKEND_MODULES": [],
"FRONTEND_MODULES": [],
"INTERNAL_ATTRIBUTES": {"attributes": {}}
}
return config
def test_read_senstive_config_data_from_env_var(self, monkeypatch, non_sensitive_config_dict):
monkeypatch.setenv("SATOSA_STATE_ENCRYPTION_KEY", "state_encryption_key")
config = SATOSAConfig(non_sensitive_config_dict)
assert config["STATE_ENCRYPTION_KEY"] == "state_encryption_key"
def test_senstive_config_data_from_env_var_overrides_config(self, monkeypatch, non_sensitive_config_dict):
non_sensitive_config_dict["STATE_ENCRYPTION_KEY"] = "bar"
monkeypatch.setenv("SATOSA_STATE_ENCRYPTION_KEY", "state_encryption_key")
config = SATOSAConfig(non_sensitive_config_dict)
assert config["STATE_ENCRYPTION_KEY"] == "state_encryption_key"
def test_constructor_should_raise_exception_if_sensitive_keys_are_missing(self, non_sensitive_config_dict):
with pytest.raises(SATOSAConfigurationError):
SATOSAConfig(non_sensitive_config_dict)
@pytest.mark.parametrize("modules_key", [
"BACKEND_MODULES",
"FRONTEND_MODULES",
"MICRO_SERVICES"
])
def test_can_read_endpoint_configs_from_dict(self, satosa_config_dict, modules_key):
expected_config = [{"foo": "bar"}, {"abc": "xyz"}]
satosa_config_dict[modules_key] = expected_config
config = SATOSAConfig(satosa_config_dict)
assert config[modules_key] == expected_config
@pytest.mark.parametrize("modules_key", [
"BACKEND_MODULES",
"FRONTEND_MODULES",
"MICRO_SERVICES"
])
def test_can_read_endpoint_configs_from_file(self, satosa_config_dict, modules_key):
satosa_config_dict[modules_key] = ["/fake_file_path"]
expected_config = {"foo": "bar"}
with patch("builtins.open", mock_open(read_data=json.dumps(expected_config))):
config = SATOSAConfig(satosa_config_dict)
assert config[modules_key] == [expected_config]
@pytest.mark.parametrize("modules_key", [
"BACKEND_MODULES",
"FRONTEND_MODULES",
"MICRO_SERVICES"
])
def test_can_read_endpoint_configs_from_file(self, satosa_config_dict, modules_key):
satosa_config_dict[modules_key] = ["/fake_file_path"]
with pytest.raises(SATOSAConfigurationError):
SATOSAConfig(satosa_config_dict)
|
from setuptools import setup, find_packages
from codecs import open
from os import path
__version__ = '0.0.1'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='json_explorer',
version=__version__,
description='Package for exploring JSON objects.',
long_description=long_description,
url='https://github.com/wcbeard/json_explorer',
download_url='https://github.com/wcbeard/json_explorer/tarball/' + __version__,
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Chris Beard',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='wcbeard10+gh@gmail.com'
)
|
# -*- coding: utf-8 -*-
#############################################################################
#
# syntax.py
#
# description: examples of syntax
#
#
# Authors:
# Cody Roux
#
#
#
##############################################################################
import boole.core.expr as expr
from boole import *
def test1():
p, q = Bool('p q')
X = deftype('X')
el = defvar('el', X)
One = defclass('One', [X, el], true)
definstance('One_int', One(Int, 1), triv())
one_inst = defconst('one_inst', One(X, el))
one = defexpr('one', abst([X, el, one_inst], el), \
pi([X, el, one_inst], X, impl=True))
test = defexpr('test', 3 + one())
test_def = get_def('test')
plus_def = get_def('+')
one_def = get_def('one')
print conv.beta_norm(expr.sub_in([plus_def, one_def], ['+', 'one'], test_def))
print
x = Real('x')
y = Real('y')
z = Real('z')
i = Int('i')
j = Int('j')
k = Int('k')
x_y_z = defexpr('x_y_z', x * y * z)
int_ops = defexpr('int_ops', 3 * i < j * 2)
poly = defvar('poly', pi(X, X >> (X >> X), impl=True))
poly_z = defexpr('poly_z', poly(z))
nat = deftype('nat')
nat_sub_real = defsub('nat_sub_real', nat <= Real)
A = deftype('A')
B = deftype('B')
op_a = defconst('op_a', A >> (A >> A))
op_b = defconst('op_b', B >> (B >> B))
p1 = defvar('p1', A*B)
p2 = defvar('p2', A*B)
op_pair = \
abst([p1, p2], pair(op_a(p1[0], p2[0]), op_b(p1[1], p2[1])))
definstance('Mul_prod', \
forall([A, op_a, B, op_b], \
implies([Mul(A, op_a), Mul(B, op_b)],\
Mul(A*B, op_pair))), \
triv())
test3 = defexpr('test3', pair(3, 3.0) * pair(2, 2.0))
test3_def = get_def('test3')
mul_def = get_def('*')
print
print conv.beta_norm(expr.sub_in([mul_def], ['*'], test3_def))
print
test4 = defexpr('test4', pair(3.0, pair(3.0, 3)) * pair(2.0, pair(2.0, 2)))
test4_def = get_def('test4')
print
print conv.beta_norm(expr.sub_in([mul_def], ['*'], test4_def))
print
test5 = defexpr('test5', pair(pair(3.0, 3), pair(3, 3)) * pair(pair(2.0, 2), pair(2, 2)))
test5_def = get_def('test5')
print
print conv.beta_norm(expr.sub_in([mul_def], ['*'], test5_def))
print
n = Int('n')
Vec = defconst('Vec', pi(n, Type))
succ = defconst('succ', Int >> Int)
nil = defconst('nil', Vec(0))
cons = defconst('cons', pi(n, Real >> (Vec(n) >> Vec(succ(n))), impl=True))
sum_vec = defconst('sum_vec', pi(n, Vec(n) >> (Vec(n) >> Vec(n)), impl=True))
add_nil = defhyp('add_nil', sum_vec(nil, nil) == nil)
v1 = defvar('v1', Vec(n))
v2 = defvar('v2', Vec(n))
a = defvar('a', Real)
b = defvar('b', Real)
add_cons_eq = sum_vec(cons(a, v1), cons(b, v2)) == cons(a+b, sum_vec(v1, v2))
add_cons = defhyp('add_cons', forall([n, a, b, v1, v2], add_cons_eq))
rev = defconst('rev', pi(n, Vec(n) >> Vec(n), impl=True))
v3 = defvar('v3', Vec(3))
rev_3 = defexpr('rev_3', rev(v3))
w = Real('w')
t = Real('t')
abs_plus = defexpr('abs_plus', abst(t, t + w))
sub_test = defexpr('sub_test', abs_plus(i))
p = pair(x, y)
proj_x_y_0 = defthm('proj_x_y_0', p[0] == x)
get_def(proj_x_y_0.name).show_proof()
prop = defexpr('prop', And(true, Or(true, false)))
p = (Real * Real)('p')
fa = forall(p, (p[0] + p[1]) == (p[1] + p[0]))
plus_commut_stmt = defexpr('plus_commut_stmt', fa, type=Bool)
plus_commut = defexpr('plus_commut', triv(), fa)
goal = current_ctxt().next_goal()
print goal
goal.interact(tactics.unpack('p', names=['x', 'y']))
goal.interact(tactics.simpl(conv.par_beta))
def test2():
x = Real('x')
y = Real('y')
z = Real('z')
i = Int('i')
j = Int('j')
k = Int('k')
p = Bool('p')
q = Bool('q')
r = Bool('r')
Beatles, (John, Paul, George, Ringo) = \
defenum('Beatles', ['John', 'Paul', 'George', 'Ringo'])
check(x * y - y * x)
check(i * j - (j % i) + j / k)
check((x ** y) / (x ** 2.0) + z ** 3.0)
check(And(p, Not(q), implies([p, q], Not(r))))
check(And(x * y == y * x, x + y != y + x, Not(x > 2.0)))
check(implies(Or(x > 0.0, y > 0.0), x ** 2.0 + y ** 2.0 > 0.0))
check(forall([x, y], x * y == y * x))
check(forall([x,y], exists(z, And(x < z, z < y))))
check(Beatles)
check(John)
if __name__ == '__main__':
set_verbose()
test1() # cody's tests
test2() # jeremy's tests
x, y = Real('x y')
new = defexpr('new', x*x + 2*y == 3)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['ThingPrincipalAttachment']
class ThingPrincipalAttachment(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
principal: Optional[pulumi.Input[str]] = None,
thing: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Attaches Principal to AWS IoT Thing.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.iot.Thing("example")
cert = aws.iot.Certificate("cert",
csr=(lambda path: open(path).read())("csr.pem"),
active=True)
att = aws.iot.ThingPrincipalAttachment("att",
principal=cert.arn,
thing=example.name)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] principal: The AWS IoT Certificate ARN or Amazon Cognito Identity ID.
:param pulumi.Input[str] thing: The name of the thing.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if principal is None:
raise TypeError("Missing required property 'principal'")
__props__['principal'] = principal
if thing is None:
raise TypeError("Missing required property 'thing'")
__props__['thing'] = thing
super(ThingPrincipalAttachment, __self__).__init__(
'aws:iot/thingPrincipalAttachment:ThingPrincipalAttachment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
principal: Optional[pulumi.Input[str]] = None,
thing: Optional[pulumi.Input[str]] = None) -> 'ThingPrincipalAttachment':
"""
Get an existing ThingPrincipalAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] principal: The AWS IoT Certificate ARN or Amazon Cognito Identity ID.
:param pulumi.Input[str] thing: The name of the thing.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["principal"] = principal
__props__["thing"] = thing
return ThingPrincipalAttachment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def principal(self) -> pulumi.Output[str]:
"""
The AWS IoT Certificate ARN or Amazon Cognito Identity ID.
"""
return pulumi.get(self, "principal")
@property
@pulumi.getter
def thing(self) -> pulumi.Output[str]:
"""
The name of the thing.
"""
return pulumi.get(self, "thing")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import random, sys
print('ROCK, PAPER, SCISSORS')
#This variables keep track of the number of wins, losses, and ties.
wins = 0
losses = 0
ties = 0
while True: # The main game loop
print('%s Wins, %s Losses, %s Ties' % (wins, losses, ties))
while True: # The player input loop.
print('Enter your move: (r)ock (p)aper (s)cissors or (q)uit')
playerMove = input()
if playerMove == 'q':
sys.exit() # Quit the program.
if playerMove == 'r' or playerMove == 'p' or playerMove == 's':
break # Break out of the player input loop.
print('Type one of r, p, s, or q.')
#Display what the player chose:
if playerMove == 'r':
print('ROCK versus...')
elif playerMove == 'p':
print('PAPER versus...')
elif playerMove == 's':
print('SCISSORS versus...')
#Display what the computer chose:
randomNumber = random.randint(1,3)
if randomNumber == 1:
computerMove = 'r'
print('ROCK')
elif randomNumber == 2:
computerMove = 'p'
print('PAPER')
elif randomNumber == 3:
computerMove = 's'
print('SCISSORS')
# Display and record the win/loss/tie:
if playerMove == computerMove:
print('It is a tie!')
ties = ties + 1
elif playerMove == 'r' and computerMove == 's':
print('You win!')
wins = wins + 1
elif playerMove == 'p' and computerMove == 'r':
print('You win!')
wins = wins + 1
elif playerMove == 's' and computerMove == 'p':
print('You win!')
wins = wins + 1
elif playerMove == 'r' and computerMove == 'p':
print('You lose!')
losses = losses + 1
elif playerMove == 'p' and computerMove == 's':
print('You lose!')
losses = losses + 1
elif playerMove == 's' and computerMove == 'r':
print('You lose!')
losses = losses + 1
|
# coding=utf-8
from data_packer import err
class BaseConverter(object):
def convert(self, src_name, dst_name, value):
"""
按需转换该字段的值
:param src_name: 字段在传入容器中的名称
:type src_name: object
:param dst_name: 字段再传出容器中的名称
:type dst_name: object
:param value: 从传入容器中取出的该字段的值
:type value:
:return: 处理后的字段值
:rtype:
"""
raise NotImplementedError('Implemented by yourself')
class TypeConverter(BaseConverter):
def __init__(self, tp):
super(TypeConverter, self).__init__()
self.tp = tp
def convert(self, src_name, dst_name, value):
return self.tp(value)
class NullConverter(BaseConverter):
def convert(self, src_name, dst_name, value):
pass
class ConverterWrapper(BaseConverter):
"""
转换函数包装器, 将转换函数包装为converter对象
"""
def __init__(self, func):
super(ConverterWrapper, self).__init__()
if not callable(func):
raise err.DataPackerProgramError('func({}) must be callable'.format(func))
self.func = func
def convert(self, src_name, dst_name, value):
return self.func(src_name, dst_name, value)
|
from matplotlib import pyplot
from shapely.geometry import LineString
from figures import BLUE, GRAY, YELLOW, GREEN, SIZE, set_limits, plot_coords
fig = pyplot.figure(1, figsize=SIZE, dpi=90) #1, figsize=(10, 4), dpi=180)
a = LineString([(0, 0), (1, 1), (1,2), (2,2)])
b = LineString([(0, 0), (1, 1), (2,1), (2,2)])
# 1: disconnected multilinestring
ax = fig.add_subplot(121)
plot_coords(ax, a)
plot_coords(ax, b)
x, y = a.xy
ax.plot(x, y, color=YELLOW, alpha=0.5, linewidth=3, solid_capstyle='round', zorder=2)
x, y = b.xy
ax.plot(x, y, color=GREEN, alpha=0.5, linewidth=3, solid_capstyle='round', zorder=2)
ax.set_title('a) lines')
set_limits(ax, -1, 3, -1, 3)
#2: invalid self-touching ring
ax = fig.add_subplot(122)
x, y = a.xy
ax.plot(x, y, color=GRAY, alpha=0.7, linewidth=1, solid_capstyle='round', zorder=1)
x, y = b.xy
ax.plot(x, y, color=GRAY, alpha=0.7, linewidth=1, solid_capstyle='round', zorder=1)
for ob in a.intersection(b):
x, y = ob.xy
if len(x) == 1:
ax.plot(x, y, 'o', color=BLUE, zorder=2)
else:
ax.plot(x, y, color=BLUE, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
ax.set_title('b) collection')
set_limits(ax, -1, 3, -1, 3)
pyplot.show()
|
from __future__ import annotations
import abc
import datetime
from io import BytesIO
import os
from textwrap import fill
from typing import (
IO,
Any,
Callable,
Hashable,
Iterable,
List,
Literal,
Mapping,
Sequence,
Union,
cast,
overload,
)
import warnings
import zipfile
from pandas._config import config
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import (
DtypeArg,
FilePath,
IntStrT,
ReadBuffer,
StorageOptions,
WriteExcelBuffer,
)
from pandas.compat._optional import (
get_version,
import_optional_dependency,
)
from pandas.errors import EmptyDataError
from pandas.util._decorators import (
Appender,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
is_bool,
is_float,
is_integer,
is_list_like,
)
from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.util.version import Version
from pandas.io.common import (
IOHandles,
get_handle,
stringify_path,
validate_header_arg,
)
from pandas.io.excel._util import (
fill_mi_header,
get_default_engine,
get_writer,
maybe_convert_usecols,
pop_header_name,
)
from pandas.io.parsers import TextParser
_read_excel_doc = (
"""
Read an Excel file into a pandas DataFrame.
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
read from a local filesystem or URL. Supports an option to read
a single sheet or a list of sheets.
Parameters
----------
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions (chart sheets do not count as a sheet position).
Lists of strings/integers are used to request multiple sheets.
Specify None to get all worksheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All worksheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
Missing values will be forward filled to allow roundtripping with
``to_excel`` for ``merged_cells=True``. To avoid forward filling the
missing values use ``set_index`` after reading the data instead of
``index_col``.
usecols : str, list-like, or callable, default None
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed
(0-indexed).
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
Returns a subset of the columns according to behavior above.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
.. deprecated:: 1.4.0
Append ``.squeeze("columns")`` to the call to ``read_excel`` to squeeze
the data.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb".
Engine compatibility :
- "xlrd" supports old-style Excel files (.xls).
- "openpyxl" supports newer Excel file formats.
- "odf" supports OpenDocument file formats (.odf, .ods, .odt).
- "pyxlsb" supports Binary Excel files.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
``pyxlsb`` will be used.
.. versionadded:: 1.3.0
- Otherwise ``openpyxl`` will be used.
.. versionchanged:: 1.3.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like, int, or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
start of the file. If callable, the callable function will be evaluated
against the row indices, returning True if the row should be skipped and
False otherwise. An example of a valid callable argument would be ``lambda
x: x in [0, 2]``.
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparsable date, the entire column or
index will be returned unaltered as an object data type. If you don`t want to
parse some cells as date just change their type in Excel to "Text".
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
decimal : str, default '.'
Character to recognize as decimal point for parsing string columns to numeric.
Note that this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.(e.g. use ',' for European data).
.. versionadded:: 1.4.0
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
.. deprecated:: 1.3.0
convert_float will be removed in a future version
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
)
@overload
def read_excel(
io,
# sheet name is str or int -> DataFrame
sheet_name: str | int,
header: int | Sequence[int] | None = ...,
names=...,
index_col: int | Sequence[int] | None = ...,
usecols=...,
squeeze: bool | None = ...,
dtype: DtypeArg | None = ...,
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ...,
converters=...,
true_values: Iterable[Hashable] | None = ...,
false_values: Iterable[Hashable] | None = ...,
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
nrows: int | None = ...,
na_values=...,
keep_default_na: bool = ...,
na_filter: bool = ...,
verbose: bool = ...,
parse_dates=...,
date_parser=...,
thousands: str | None = ...,
decimal: str = ...,
comment: str | None = ...,
skipfooter: int = ...,
convert_float: bool | None = ...,
mangle_dupe_cols: bool = ...,
storage_options: StorageOptions = ...,
) -> DataFrame:
...
@overload
def read_excel(
io,
# sheet name is list or None -> dict[IntStrT, DataFrame]
sheet_name: list[IntStrT] | None,
header: int | Sequence[int] | None = ...,
names=...,
index_col: int | Sequence[int] | None = ...,
usecols=...,
squeeze: bool | None = ...,
dtype: DtypeArg | None = ...,
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ...,
converters=...,
true_values: Iterable[Hashable] | None = ...,
false_values: Iterable[Hashable] | None = ...,
skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
nrows: int | None = ...,
na_values=...,
keep_default_na: bool = ...,
na_filter: bool = ...,
verbose: bool = ...,
parse_dates=...,
date_parser=...,
thousands: str | None = ...,
decimal: str = ...,
comment: str | None = ...,
skipfooter: int = ...,
convert_float: bool | None = ...,
mangle_dupe_cols: bool = ...,
storage_options: StorageOptions = ...,
) -> dict[IntStrT, DataFrame]:
...
@doc(storage_options=_shared_docs["storage_options"])
@deprecate_nonkeyword_arguments(allowed_args=["io", "sheet_name"], version="2.0")
@Appender(_read_excel_doc)
def read_excel(
io,
sheet_name: str | int | list[IntStrT] | None = 0,
header: int | Sequence[int] | None = 0,
names=None,
index_col: int | Sequence[int] | None = None,
usecols=None,
squeeze: bool | None = None,
dtype: DtypeArg | None = None,
engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = None,
converters=None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
keep_default_na: bool = True,
na_filter: bool = True,
verbose: bool = False,
parse_dates=False,
date_parser=None,
thousands: str | None = None,
decimal: str = ".",
comment: str | None = None,
skipfooter: int = 0,
convert_float: bool | None = None,
mangle_dupe_cols: bool = True,
storage_options: StorageOptions = None,
) -> DataFrame | dict[IntStrT, DataFrame]:
should_close = False
if not isinstance(io, ExcelFile):
should_close = True
io = ExcelFile(io, storage_options=storage_options, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
"Engine should not be specified when passing "
"an ExcelFile - ExcelFile already has the engine set"
)
try:
data = io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
decimal=decimal,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
)
finally:
# make sure to close opened file handles
if should_close:
io.close()
return data
class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(
self, filepath_or_buffer, storage_options: StorageOptions = None
) -> None:
# First argument can also be bytes, so create a buffer
if isinstance(filepath_or_buffer, bytes):
filepath_or_buffer = BytesIO(filepath_or_buffer)
self.handles = IOHandles(
handle=filepath_or_buffer, compression={"method": None}
)
if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
self.handles = get_handle(
filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
)
if isinstance(self.handles.handle, self._workbook_class):
self.book = self.handles.handle
elif hasattr(self.handles.handle, "read"):
# N.B. xlrd.Book has a read attribute too
self.handles.handle.seek(0)
try:
self.book = self.load_workbook(self.handles.handle)
except Exception:
self.close()
raise
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
)
@property
@abc.abstractmethod
def _workbook_class(self):
pass
@abc.abstractmethod
def load_workbook(self, filepath_or_buffer):
pass
def close(self) -> None:
if hasattr(self, "book"):
if hasattr(self.book, "close"):
# pyxlsb: opens a TemporaryFile
# openpyxl: https://stackoverflow.com/questions/31416842/
# openpyxl-does-not-close-excel-workbook-in-read-only-mode
self.book.close()
elif hasattr(self.book, "release_resources"):
# xlrd
# https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
self.book.release_resources()
self.handles.close()
@property
@abc.abstractmethod
def sheet_names(self) -> list[str]:
pass
@abc.abstractmethod
def get_sheet_by_name(self, name: str):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index: int):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float: bool):
pass
def raise_if_bad_sheet_by_index(self, index: int) -> None:
n_sheets = len(self.sheet_names)
if index >= n_sheets:
raise ValueError(
f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
)
def raise_if_bad_sheet_by_name(self, name: str) -> None:
if name not in self.sheet_names:
raise ValueError(f"Worksheet named '{name}' not found")
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
header: int | Sequence[int] | None = 0,
names=None,
index_col: int | Sequence[int] | None = None,
usecols=None,
squeeze: bool | None = None,
dtype: DtypeArg | None = None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
verbose: bool = False,
parse_dates=False,
date_parser=None,
thousands: str | None = None,
decimal: str = ".",
comment: str | None = None,
skipfooter: int = 0,
convert_float: bool | None = None,
mangle_dupe_cols: bool = True,
**kwds,
):
if convert_float is None:
convert_float = True
else:
warnings.warn(
"convert_float is deprecated and will be removed in a future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
sheets: list[int] | list[str]
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
elif isinstance(sheet_name, str):
sheets = [sheet_name]
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = cast(Union[List[int], List[str]], list(dict.fromkeys(sheets).keys()))
output = {}
for asheetname in sheets:
if verbose:
print(f"Reading sheet {asheetname}")
if isinstance(asheetname, str):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
if hasattr(sheet, "close"):
# pyxlsb opens two TemporaryFiles
sheet.close()
usecols = maybe_convert_usecols(usecols)
if not data:
output[asheetname] = DataFrame()
continue
is_list_header = False
is_len_one_list_header = False
if is_list_like(header):
assert isinstance(header, Sequence)
is_list_header = True
if len(header) == 1:
is_len_one_list_header = True
if is_len_one_list_header:
header = cast(Sequence[int], header)[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and is_list_like(header):
assert isinstance(header, Sequence)
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
assert isinstance(skiprows, int)
row += skiprows
data[row], control_row = fill_mi_header(data[row], control_row)
if index_col is not None:
header_name, _ = pop_header_name(data[row], index_col)
header_names.append(header_name)
# If there is a MultiIndex header and an index then there is also
# a row containing just the index name(s)
has_index_names = (
is_list_header and not is_len_one_list_header and index_col is not None
)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if header is None:
offset = 0
elif isinstance(header, int):
offset = 1 + header
else:
offset = 1 + max(header)
# GH34673: if MultiIndex names present and not defined in the header,
# offset needs to be incremented so that forward filling starts
# from the first MI value instead of the name
if has_index_names:
offset += 1
# Check if we have an empty dataset
# before trying to collect data.
if offset < len(data):
assert isinstance(index_col, Sequence)
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(
data,
names=names,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
skip_blank_lines=False, # GH 39808
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
decimal=decimal,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
output[asheetname] = parser.read(nrows=nrows)
if not squeeze or isinstance(output[asheetname], DataFrame):
if header_names:
output[asheetname].columns = output[
asheetname
].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
@doc(storage_options=_shared_docs["storage_options"])
class ExcelWriter(metaclass=abc.ABCMeta):
"""
Class for writing DataFrame objects into excel sheets.
Default is to use :
* xlwt for xls
* xlsxwriter for xlsx if xlsxwriter is installed otherwise openpyxl
* odf for ods.
See DataFrame.to_excel for typical usage.
The writer should be used as a context manager. Otherwise, call `close()` to save
and close any opened file handles.
Parameters
----------
path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
.. deprecated:: 1.2.0
As the `xlwt <https://pypi.org/project/xlwt/>`__ package is no longer
maintained, the ``xlwt`` engine will be removed in a future
version of pandas.
date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
datetime_format : str, default None
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {{'w', 'a'}}, default 'w'
File mode to use (write or append). Append does not work with fsspec URLs.
{storage_options}
.. versionadded:: 1.2.0
if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
How to behave when trying to write to a sheet that already
exists (append mode only).
* error: raise a ValueError.
* new: Create a new sheet, with a name determined by the engine.
* replace: Delete the contents of the sheet before writing to it.
* overlay: Write contents to the existing sheet without removing the old
contents.
.. versionadded:: 1.3.0
.. versionchanged:: 1.4.0
Added ``overlay`` option
engine_kwargs : dict, optional
Keyword arguments to be passed into the engine. These will be passed to
the following functions of the respective engines:
* xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
* openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
* openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
* odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
.. versionadded:: 1.3.0
**kwargs : dict, optional
Keyword arguments to be passed into the engine.
.. deprecated:: 1.3.0
Use engine_kwargs instead.
Notes
-----
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df.to_excel(writer) # doctest: +SKIP
To write to separate sheets in a single file:
>>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
>>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
You can set the date format or datetime format:
>>> from datetime import date, datetime # doctest: +SKIP
>>> df = pd.DataFrame(
... [
... [date(2014, 1, 31), date(1999, 9, 24)],
... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
... ],
... index=["Date", "Datetime"],
... columns=["X", "Y"],
... ) # doctest: +SKIP
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... date_format="YYYY-MM-DD",
... datetime_format="YYYY-MM-DD HH:MM:SS"
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can also append to an existing Excel file:
>>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
already exists:
>>> with ExcelWriter(
... "path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="replace",
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
You can also write multiple DataFrames to a single sheet. Note that the
``if_sheet_exists`` parameter needs to be set to ``overlay``:
>>> with ExcelWriter("path_to_file.xlsx",
... mode="a",
... engine="openpyxl",
... if_sheet_exists="overlay",
... ) as writer:
... df1.to_excel(writer, sheet_name="Sheet1")
... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
You can store Excel file in RAM:
>>> import io
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
>>> buffer = io.BytesIO()
>>> with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
You can pack Excel file into zip archive:
>>> import zipfile # doctest: +SKIP
>>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
>>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
... with zf.open("filename.xlsx", "w") as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer) # doctest: +SKIP
You can specify additional arguments to the underlying engine:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="xlsxwriter",
... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
... ) as writer:
... df.to_excel(writer) # doctest: +SKIP
In append mode, ``engine_kwargs`` are passed through to
openpyxl's ``load_workbook``:
>>> with pd.ExcelWriter(
... "path_to_file.xlsx",
... engine="openpyxl",
... mode="a",
... engine_kwargs={{"keep_vba": True}}
... ) as writer:
... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``_supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``_engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
_engine: str
_supported_extensions: tuple[str, ...]
def __new__(
cls,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: Literal["error", "new", "replace", "overlay"] | None = None,
engine_kwargs: dict | None = None,
**kwargs,
):
if kwargs:
if engine_kwargs is not None:
raise ValueError("Cannot use both engine_kwargs and **kwargs")
warnings.warn(
"Use of **kwargs is deprecated, use engine_kwargs instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="writer")
except KeyError as err:
raise ValueError(f"No engine for filetype: '{ext}'") from err
if engine == "xlwt":
xls_config_engine = config.get_option(
"io.excel.xls.writer", silent=True
)
# Don't warn a 2nd time if user has changed the default engine for xls
if xls_config_engine != "xlwt":
warnings.warn(
"As the xlwt package is no longer maintained, the xlwt "
"engine will be removed in a future version of pandas. "
"This is the only engine in pandas that supports writing "
"in the xls format. Install openpyxl and write to an xlsx "
"file instead. You can set the option io.excel.xls.writer "
"to 'xlwt' to silence this warning. While this option is "
"deprecated and will also raise a warning, it can "
"be globally set and the warning suppressed.",
FutureWarning,
stacklevel=find_stack_level(),
)
# for mypy
assert engine is not None
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
_path = None
@property
def supported_extensions(self) -> tuple[str, ...]:
"""Extensions that writer engine supports."""
return self._supported_extensions
@property
def engine(self) -> str:
"""Name of engine."""
return self._engine
@property
@abc.abstractmethod
def sheets(self) -> dict[str, Any]:
"""Mapping of sheet names to sheet objects."""
pass
@property
@abc.abstractmethod
def book(self):
"""
Book instance. Class type will depend on the engine used.
This attribute can be used to access engine-specific features.
"""
pass
def write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
"""
Write given formatted cells into Excel an excel sheet
.. deprecated:: 1.5.0
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
self._deprecate("write_cells")
return self._write_cells(cells, sheet_name, startrow, startcol, freeze_panes)
@abc.abstractmethod
def _write_cells(
self,
cells,
sheet_name: str | None = None,
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
) -> None:
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
def save(self) -> None:
"""
Save workbook to disk.
.. deprecated:: 1.5.0
"""
self._deprecate("save")
return self._save()
@abc.abstractmethod
def _save(self) -> None:
"""
Save workbook to disk.
"""
pass
def __init__(
self,
path: FilePath | WriteExcelBuffer | ExcelWriter,
engine: str | None = None,
date_format: str | None = None,
datetime_format: str | None = None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: str | None = None,
engine_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
# use mode to open the file
if "b" not in mode:
mode += "b"
# use "a" for the user to append data to excel but internally use "r+" to let
# the excel backend first read the existing file and then write any data to it
mode = mode.replace("a", "r+")
# cast ExcelWriter to avoid adding 'if self.handles is not None'
self._handles = IOHandles(
cast(IO[bytes], path), compression={"compression": None}
)
if not isinstance(path, ExcelWriter):
self._handles = get_handle(
path, mode, storage_options=storage_options, is_text=False
)
self._cur_sheet = None
if date_format is None:
self._date_format = "YYYY-MM-DD"
else:
self._date_format = date_format
if datetime_format is None:
self._datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self._datetime_format = datetime_format
self._mode = mode
if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
raise ValueError(
f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
"Valid options are 'error', 'new', 'replace' and 'overlay'."
)
if if_sheet_exists and "r+" not in mode:
raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
if if_sheet_exists is None:
if_sheet_exists = "error"
self._if_sheet_exists = if_sheet_exists
def _deprecate(self, attr: str):
"""
Deprecate attribute or method for ExcelWriter.
"""
warnings.warn(
f"{attr} is not part of the public API, usage can give in unexpected "
"results and will be removed in a future version",
FutureWarning,
stacklevel=find_stack_level(),
)
@property
def date_format(self) -> str:
"""
Format string for dates written into Excel files (e.g. ‘YYYY-MM-DD’).
"""
return self._date_format
@property
def datetime_format(self) -> str:
"""
Format string for dates written into Excel files (e.g. ‘YYYY-MM-DD’).
"""
return self._datetime_format
@property
def if_sheet_exists(self) -> str:
"""
How to behave when writing to a sheet that already exists in append mode.
"""
return self._if_sheet_exists
@property
def cur_sheet(self):
"""
Current sheet for writing.
.. deprecated:: 1.5.0
"""
self._deprecate("cur_sheet")
return self._cur_sheet
@property
def handles(self):
"""
Handles to Excel sheets.
.. deprecated:: 1.5.0
"""
self._deprecate("handles")
return self._handles
@property
def path(self):
"""
Path to Excel file.
.. deprecated:: 1.5.0
"""
self._deprecate("path")
return self._path
def __fspath__(self):
return getattr(self._handles.handle, "name", "")
def _get_sheet_name(self, sheet_name: str | None) -> str:
if sheet_name is None:
sheet_name = self._cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
return sheet_name
def _value_with_fmt(self, val) -> tuple[object, str | None]:
"""
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime.datetime):
fmt = self._datetime_format
elif isinstance(val, datetime.date):
fmt = self._date_format
elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / 86400
fmt = "0"
else:
val = str(val)
return val, fmt
@classmethod
def check_extension(cls, ext: str) -> Literal[True]:
"""
checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError.
"""
if ext.startswith("."):
ext = ext[1:]
if not any(ext in extension for extension in cls._supported_extensions):
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self) -> None:
"""synonym for save, to make it more file-like"""
self._save()
self._handles.close()
XLS_SIGNATURES = (
b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
)
ZIP_SIGNATURE = b"PK\x03\x04"
PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
@doc(storage_options=_shared_docs["storage_options"])
def inspect_excel_format(
content_or_path: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions = None,
) -> str | None:
"""
Inspect the path or content of an excel file and get its format.
Adopted from xlrd: https://github.com/python-excel/xlrd.
Parameters
----------
content_or_path : str or file-like object
Path to file or content of file to inspect. May be a URL.
{storage_options}
Returns
-------
str or None
Format of file if it can be determined.
Raises
------
ValueError
If resulting stream is empty.
BadZipFile
If resulting stream does not have an XLS signature and is not a valid zipfile.
"""
if isinstance(content_or_path, bytes):
content_or_path = BytesIO(content_or_path)
with get_handle(
content_or_path, "rb", storage_options=storage_options, is_text=False
) as handle:
stream = handle.handle
stream.seek(0)
buf = stream.read(PEEK_SIZE)
if buf is None:
raise ValueError("stream is empty")
else:
assert isinstance(buf, bytes)
peek = buf
stream.seek(0)
if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
return "xls"
elif not peek.startswith(ZIP_SIGNATURE):
return None
with zipfile.ZipFile(stream) as zf:
# Workaround for some third party files that use forward slashes and
# lower case names.
component_names = [
name.replace("\\", "/").lower() for name in zf.namelist()
]
if "xl/workbook.xml" in component_names:
return "xlsx"
if "xl/workbook.bin" in component_names:
return "xlsb"
if "content.xml" in component_names:
return "ods"
return "zip"
class ExcelFile:
"""
Class for parsing tabular excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
a file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``
Engine compatibility :
- ``xlrd`` supports old-style Excel files (.xls).
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
.. versionchanged:: 1.2.0
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
.. versionadded:: 1.3.0
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
- Otherwise ``xlrd`` will be used and a ``FutureWarning`` will be raised.
This case will raise a ``ValueError`` in a future version of pandas.
.. warning::
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
This is not supported, switch to using ``openpyxl`` instead.
"""
from pandas.io.excel._odfreader import ODFReader
from pandas.io.excel._openpyxl import OpenpyxlReader
from pandas.io.excel._pyxlsb import PyxlsbReader
from pandas.io.excel._xlrd import XlrdReader
_engines: Mapping[str, Any] = {
"xlrd": XlrdReader,
"openpyxl": OpenpyxlReader,
"odf": ODFReader,
"pyxlsb": PyxlsbReader,
}
def __init__(
self,
path_or_buffer,
engine: str | None = None,
storage_options: StorageOptions = None,
) -> None:
if engine is not None and engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
# First argument can also be bytes, so create a buffer
if isinstance(path_or_buffer, bytes):
path_or_buffer = BytesIO(path_or_buffer)
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
# Always a string
self._io = stringify_path(path_or_buffer)
# Determine xlrd version if installed
if import_optional_dependency("xlrd", errors="ignore") is None:
xlrd_version = None
else:
import xlrd
xlrd_version = Version(get_version(xlrd))
ext = None
if engine is None:
# Only determine ext if it is needed
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
content_or_path=path_or_buffer, storage_options=storage_options
)
if ext is None:
raise ValueError(
"Excel file format cannot be determined, you must specify "
"an engine manually."
)
engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
if engine == "xlrd" and xlrd_version is not None:
if ext is None:
# Need ext to determine ext in order to raise/warn
if isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
else:
ext = inspect_excel_format(
path_or_buffer, storage_options=storage_options
)
# Pass through if ext is None, otherwise check if ext valid for xlrd
if ext and ext != "xls" and xlrd_version >= Version("2"):
raise ValueError(
f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
f"only the xls format is supported. Install openpyxl instead."
)
elif ext and ext != "xls":
stacklevel = find_stack_level()
warnings.warn(
f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, "
f"only the xls format is supported. Install "
f"openpyxl instead.",
FutureWarning,
stacklevel=stacklevel,
)
assert engine is not None
self.engine = engine
self.storage_options = storage_options
self._reader = self._engines[engine](self._io, storage_options=storage_options)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
header: int | Sequence[int] | None = 0,
names=None,
index_col: int | Sequence[int] | None = None,
usecols=None,
squeeze: bool | None = None,
converters=None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands: str | None = None,
comment: str | None = None,
skipfooter: int = 0,
convert_float: bool | None = None,
mangle_dupe_cols: bool = True,
**kwds,
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self) -> None:
"""close io if necessary"""
self._reader.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
# Ensure we don't leak file descriptors, but put in try/except in case
# attributes are already deleted
try:
self.close()
except AttributeError:
pass
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['AadDiagnosticSettingArgs', 'AadDiagnosticSetting']
@pulumi.input_type
class AadDiagnosticSettingArgs:
def __init__(__self__, *,
logs: pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]],
eventhub_authorization_rule_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
log_analytics_workspace_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AadDiagnosticSetting resource.
:param pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]] logs: One or more `log` blocks as defined below.
:param pulumi.Input[str] eventhub_authorization_rule_id: Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
:param pulumi.Input[str] eventhub_name: Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
:param pulumi.Input[str] log_analytics_workspace_id: Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
:param pulumi.Input[str] name: The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "logs", logs)
if eventhub_authorization_rule_id is not None:
pulumi.set(__self__, "eventhub_authorization_rule_id", eventhub_authorization_rule_id)
if eventhub_name is not None:
pulumi.set(__self__, "eventhub_name", eventhub_name)
if log_analytics_workspace_id is not None:
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
if name is not None:
pulumi.set(__self__, "name", name)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter
def logs(self) -> pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]]:
"""
One or more `log` blocks as defined below.
"""
return pulumi.get(self, "logs")
@logs.setter
def logs(self, value: pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]]):
pulumi.set(self, "logs", value)
@property
@pulumi.getter(name="eventhubAuthorizationRuleId")
def eventhub_authorization_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_authorization_rule_id")
@eventhub_authorization_rule_id.setter
def eventhub_authorization_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_authorization_rule_id", value)
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_name")
@eventhub_name.setter
def eventhub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_name", value)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
@pulumi.input_type
class _AadDiagnosticSettingState:
def __init__(__self__, *,
eventhub_authorization_rule_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
log_analytics_workspace_id: Optional[pulumi.Input[str]] = None,
logs: Optional[pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AadDiagnosticSetting resources.
:param pulumi.Input[str] eventhub_authorization_rule_id: Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
:param pulumi.Input[str] eventhub_name: Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
:param pulumi.Input[str] log_analytics_workspace_id: Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
:param pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]] logs: One or more `log` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
if eventhub_authorization_rule_id is not None:
pulumi.set(__self__, "eventhub_authorization_rule_id", eventhub_authorization_rule_id)
if eventhub_name is not None:
pulumi.set(__self__, "eventhub_name", eventhub_name)
if log_analytics_workspace_id is not None:
pulumi.set(__self__, "log_analytics_workspace_id", log_analytics_workspace_id)
if logs is not None:
pulumi.set(__self__, "logs", logs)
if name is not None:
pulumi.set(__self__, "name", name)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="eventhubAuthorizationRuleId")
def eventhub_authorization_rule_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_authorization_rule_id")
@eventhub_authorization_rule_id.setter
def eventhub_authorization_rule_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_authorization_rule_id", value)
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_name")
@eventhub_name.setter
def eventhub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eventhub_name", value)
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@log_analytics_workspace_id.setter
def log_analytics_workspace_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_analytics_workspace_id", value)
@property
@pulumi.getter
def logs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]]]:
"""
One or more `log` blocks as defined below.
"""
return pulumi.get(self, "logs")
@logs.setter
def logs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AadDiagnosticSettingLogArgs']]]]):
pulumi.set(self, "logs", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
@storage_account_id.setter
def storage_account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_id", value)
class AadDiagnosticSetting(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_authorization_rule_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
log_analytics_workspace_id: Optional[pulumi.Input[str]] = None,
logs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AadDiagnosticSettingLogArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Azure Active Directory Diagnostic Setting for Azure Monitor.
!> **Authentication** The API for this resource does not support service principal authentication. This resource can only be used with Azure CLI authentication.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="west europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_kind="StorageV2",
account_replication_type="LRS")
example_aad_diagnostic_setting = azure.monitoring.AadDiagnosticSetting("exampleAadDiagnosticSetting",
storage_account_id=example_account.id,
logs=[
azure.monitoring.AadDiagnosticSettingLogArgs(
category="SignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="AuditLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="NonInteractiveUserSignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ServicePrincipalSignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ManagedIdentitySignInLogs",
enabled=False,
retention_policy={},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ProvisioningLogs",
enabled=False,
retention_policy={},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ADFSSignInLogs",
enabled=False,
retention_policy={},
),
])
```
## Import
Monitor Azure Active Directory Diagnostic Settings can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/aadDiagnosticSetting:AadDiagnosticSetting example /providers/Microsoft.AADIAM/diagnosticSettings/setting1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] eventhub_authorization_rule_id: Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
:param pulumi.Input[str] eventhub_name: Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
:param pulumi.Input[str] log_analytics_workspace_id: Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AadDiagnosticSettingLogArgs']]]] logs: One or more `log` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AadDiagnosticSettingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Azure Active Directory Diagnostic Setting for Azure Monitor.
!> **Authentication** The API for this resource does not support service principal authentication. This resource can only be used with Azure CLI authentication.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="west europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_kind="StorageV2",
account_replication_type="LRS")
example_aad_diagnostic_setting = azure.monitoring.AadDiagnosticSetting("exampleAadDiagnosticSetting",
storage_account_id=example_account.id,
logs=[
azure.monitoring.AadDiagnosticSettingLogArgs(
category="SignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="AuditLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="NonInteractiveUserSignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ServicePrincipalSignInLogs",
enabled=True,
retention_policy={
"enabled": True,
"days": 1,
},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ManagedIdentitySignInLogs",
enabled=False,
retention_policy={},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ProvisioningLogs",
enabled=False,
retention_policy={},
),
azure.monitoring.AadDiagnosticSettingLogArgs(
category="ADFSSignInLogs",
enabled=False,
retention_policy={},
),
])
```
## Import
Monitor Azure Active Directory Diagnostic Settings can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/aadDiagnosticSetting:AadDiagnosticSetting example /providers/Microsoft.AADIAM/diagnosticSettings/setting1
```
:param str resource_name: The name of the resource.
:param AadDiagnosticSettingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AadDiagnosticSettingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_authorization_rule_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
log_analytics_workspace_id: Optional[pulumi.Input[str]] = None,
logs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AadDiagnosticSettingLogArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AadDiagnosticSettingArgs.__new__(AadDiagnosticSettingArgs)
__props__.__dict__["eventhub_authorization_rule_id"] = eventhub_authorization_rule_id
__props__.__dict__["eventhub_name"] = eventhub_name
__props__.__dict__["log_analytics_workspace_id"] = log_analytics_workspace_id
if logs is None and not opts.urn:
raise TypeError("Missing required property 'logs'")
__props__.__dict__["logs"] = logs
__props__.__dict__["name"] = name
__props__.__dict__["storage_account_id"] = storage_account_id
super(AadDiagnosticSetting, __self__).__init__(
'azure:monitoring/aadDiagnosticSetting:AadDiagnosticSetting',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
eventhub_authorization_rule_id: Optional[pulumi.Input[str]] = None,
eventhub_name: Optional[pulumi.Input[str]] = None,
log_analytics_workspace_id: Optional[pulumi.Input[str]] = None,
logs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AadDiagnosticSettingLogArgs']]]]] = None,
name: Optional[pulumi.Input[str]] = None,
storage_account_id: Optional[pulumi.Input[str]] = None) -> 'AadDiagnosticSetting':
"""
Get an existing AadDiagnosticSetting resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] eventhub_authorization_rule_id: Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
:param pulumi.Input[str] eventhub_name: Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
:param pulumi.Input[str] log_analytics_workspace_id: Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AadDiagnosticSettingLogArgs']]]] logs: One or more `log` blocks as defined below.
:param pulumi.Input[str] name: The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
:param pulumi.Input[str] storage_account_id: The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AadDiagnosticSettingState.__new__(_AadDiagnosticSettingState)
__props__.__dict__["eventhub_authorization_rule_id"] = eventhub_authorization_rule_id
__props__.__dict__["eventhub_name"] = eventhub_name
__props__.__dict__["log_analytics_workspace_id"] = log_analytics_workspace_id
__props__.__dict__["logs"] = logs
__props__.__dict__["name"] = name
__props__.__dict__["storage_account_id"] = storage_account_id
return AadDiagnosticSetting(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eventhubAuthorizationRuleId")
def eventhub_authorization_rule_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the ID of an Event Hub Namespace Authorization Rule used to send Diagnostics Data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_authorization_rule_id")
@property
@pulumi.getter(name="eventhubName")
def eventhub_name(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the name of the Event Hub where Diagnostics Data should be sent. If not specified, the default Event Hub will be used. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "eventhub_name")
@property
@pulumi.getter(name="logAnalyticsWorkspaceId")
def log_analytics_workspace_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the ID of a Log Analytics Workspace where Diagnostics Data should be sent.
"""
return pulumi.get(self, "log_analytics_workspace_id")
@property
@pulumi.getter
def logs(self) -> pulumi.Output[Sequence['outputs.AadDiagnosticSettingLog']]:
"""
One or more `log` blocks as defined below.
"""
return pulumi.get(self, "logs")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Monitor Azure Active Directory Diagnostic Setting. Changing this forces a new Monitor Azure Active Directory Diagnostic Setting to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the Storage Account where logs should be sent. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "storage_account_id")
|
import os
import subprocess
import sys
import pytest
def test_adding_deps(tmpdir):
assert 'COB_NO_REENTRY' not in os.environ
with pytest.raises(ImportError):
import pact
projdir = tmpdir.join('proj')
yaml = projdir.join('.cob-project.yml')
python = str(projdir.join('.cob/env/bin/python'))
with yaml.open('a', ensure=True) as f:
print('name: testproj', file=f)
_run_cob(projdir, 'bootstrap')
assert os.path.exists(python)
assert subprocess.call([python, '-c', 'import pact']) == 1
with yaml.open('a') as f:
print('deps:', file=f)
print(' - pact', file=f)
_run_cob(projdir, 'bootstrap')
assert subprocess.call([python, '-c', 'import pact']) == 0
def _run_cob(cwd, cmd):
subprocess.check_call(
[sys.executable, '-m', 'cob.cli.main', '-vvvvv', str(cmd)],
cwd=str(cwd),
env={**os.environ, 'COB_DEVELOP': '1'},
)
|
# Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import aiopg
import psycopg2
import pytest
from opentelemetry import trace as trace_api
from opentelemetry.instrumentation.aiopg import AiopgInstrumentor
from opentelemetry.test.test_base import TestBase
POSTGRES_HOST = os.getenv("POSTGRESQL_HOST", "localhost")
POSTGRES_PORT = int(os.getenv("POSTGRESQL_PORT", "5432"))
POSTGRES_DB_NAME = os.getenv("POSTGRESQL_DB_NAME", "opentelemetry-tests")
POSTGRES_PASSWORD = os.getenv("POSTGRESQL_PASSWORD", "testpassword")
POSTGRES_USER = os.getenv("POSTGRESQL_USER", "testuser")
def async_call(coro):
loop = asyncio.get_event_loop()
return loop.run_until_complete(coro)
class TestFunctionalAiopgConnect(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._connection = None
cls._cursor = None
cls._tracer = cls.tracer_provider.get_tracer(__name__)
AiopgInstrumentor().instrument(tracer_provider=cls.tracer_provider)
cls._connection = async_call(
aiopg.connect(
dbname=POSTGRES_DB_NAME,
user=POSTGRES_USER,
password=POSTGRES_PASSWORD,
host=POSTGRES_HOST,
port=POSTGRES_PORT,
)
)
cls._cursor = async_call(cls._connection.cursor())
@classmethod
def tearDownClass(cls):
if cls._cursor:
cls._cursor.close()
if cls._connection:
cls._connection.close()
AiopgInstrumentor().uninstrument()
def validate_spans(self, span_name):
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 2)
for span in spans:
if span.name == "rootSpan":
root_span = span
else:
child_span = span
self.assertIsInstance(span.start_time, int)
self.assertIsInstance(span.end_time, int)
self.assertIsNotNone(root_span)
self.assertIsNotNone(child_span)
self.assertEqual(root_span.name, "rootSpan")
self.assertEqual(child_span.name, span_name)
self.assertIsNotNone(child_span.parent)
self.assertIs(child_span.parent, root_span.get_span_context())
self.assertIs(child_span.kind, trace_api.SpanKind.CLIENT)
self.assertEqual(child_span.attributes["db.system"], "postgresql")
self.assertEqual(child_span.attributes["db.name"], POSTGRES_DB_NAME)
self.assertEqual(child_span.attributes["db.user"], POSTGRES_USER)
self.assertEqual(child_span.attributes["net.peer.name"], POSTGRES_HOST)
self.assertEqual(child_span.attributes["net.peer.port"], POSTGRES_PORT)
def test_execute(self):
"""Should create a child span for execute method"""
stmt = "CREATE TABLE IF NOT EXISTS test (id integer)"
with self._tracer.start_as_current_span("rootSpan"):
async_call(self._cursor.execute(stmt))
self.validate_spans(stmt)
def test_executemany(self):
"""Should create a child span for executemany"""
stmt = "INSERT INTO test (id) VALUES (%s)"
with pytest.raises(psycopg2.ProgrammingError):
with self._tracer.start_as_current_span("rootSpan"):
data = (("1",), ("2",), ("3",))
async_call(self._cursor.executemany(stmt, data))
self.validate_spans(stmt)
def test_callproc(self):
"""Should create a child span for callproc"""
with self._tracer.start_as_current_span("rootSpan"), self.assertRaises(
Exception
):
async_call(self._cursor.callproc("test", ()))
self.validate_spans("test")
class TestFunctionalAiopgCreatePool(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._connection = None
cls._cursor = None
cls._tracer = cls.tracer_provider.get_tracer(__name__)
AiopgInstrumentor().instrument(tracer_provider=cls.tracer_provider)
cls._pool = async_call(
aiopg.create_pool(
dbname=POSTGRES_DB_NAME,
user=POSTGRES_USER,
password=POSTGRES_PASSWORD,
host=POSTGRES_HOST,
port=POSTGRES_PORT,
)
)
cls._connection = async_call(cls._pool.acquire())
cls._cursor = async_call(cls._connection.cursor())
@classmethod
def tearDownClass(cls):
if cls._cursor:
cls._cursor.close()
if cls._connection:
cls._connection.close()
if cls._pool:
cls._pool.close()
AiopgInstrumentor().uninstrument()
def validate_spans(self, span_name):
spans = self.memory_exporter.get_finished_spans()
self.assertEqual(len(spans), 2)
for span in spans:
if span.name == "rootSpan":
root_span = span
else:
child_span = span
self.assertIsInstance(span.start_time, int)
self.assertIsInstance(span.end_time, int)
self.assertIsNotNone(root_span)
self.assertIsNotNone(child_span)
self.assertEqual(root_span.name, "rootSpan")
self.assertEqual(child_span.name, span_name)
self.assertIsNotNone(child_span.parent)
self.assertIs(child_span.parent, root_span.get_span_context())
self.assertIs(child_span.kind, trace_api.SpanKind.CLIENT)
self.assertEqual(child_span.attributes["db.system"], "postgresql")
self.assertEqual(child_span.attributes["db.name"], POSTGRES_DB_NAME)
self.assertEqual(child_span.attributes["db.user"], POSTGRES_USER)
self.assertEqual(child_span.attributes["net.peer.name"], POSTGRES_HOST)
self.assertEqual(child_span.attributes["net.peer.port"], POSTGRES_PORT)
def test_execute(self):
"""Should create a child span for execute method"""
stmt = "CREATE TABLE IF NOT EXISTS test (id integer)"
with self._tracer.start_as_current_span("rootSpan"):
async_call(self._cursor.execute(stmt))
self.validate_spans(stmt)
def test_executemany(self):
"""Should create a child span for executemany"""
stmt = "INSERT INTO test (id) VALUES (%s)"
with pytest.raises(psycopg2.ProgrammingError):
with self._tracer.start_as_current_span("rootSpan"):
data = (("1",), ("2",), ("3",))
async_call(self._cursor.executemany(stmt, data))
self.validate_spans(stmt)
def test_callproc(self):
"""Should create a child span for callproc"""
with self._tracer.start_as_current_span("rootSpan"), self.assertRaises(
Exception
):
async_call(self._cursor.callproc("test", ()))
self.validate_spans("test")
|
import pygame
pygame.init()
screen = pygame.display.set_mode(800, 600)
while True:
pass
|
import scrython
query = input("What editions of a card are you looking for? ")
data = scrython.cards.Search(q="++{}".format(query))
for card in data.data():
print(card['set'].upper(), ":", card['set_name'])
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
from scam.consensus.coinbase import pool_parent_id, farmer_parent_id
from scam.types.blockchain_format.coin import Coin
from scam.types.blockchain_format.sized_bytes import bytes32
from scam.types.mempool_inclusion_status import MempoolInclusionStatus
from scam.types.spend_bundle import SpendBundle
from scam.util.ints import uint8, uint32, uint64
from scam.util.streamable import Streamable, streamable
from scam.wallet.util.transaction_type import TransactionType
@dataclass(frozen=True)
@streamable
class TransactionRecord(Streamable):
"""
Used for storing transaction data and status in wallets.
"""
confirmed_at_height: uint32
created_at_time: uint64
to_puzzle_hash: bytes32
amount: uint64
fee_amount: uint64
confirmed: bool
sent: uint32
spend_bundle: Optional[SpendBundle]
additions: List[Coin]
removals: List[Coin]
wallet_id: uint32
# Represents the list of peers that we sent the transaction to, whether each one
# included it in the mempool, and what the error message (if any) was
sent_to: List[Tuple[str, uint8, Optional[str]]]
trade_id: Optional[bytes32]
type: uint32 # TransactionType
name: bytes32
def is_in_mempool(self) -> bool:
# If one of the nodes we sent it to responded with success, we set it to success
for (_, mis, _) in self.sent_to:
if MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS:
return True
# Note, transactions pending inclusion (pending) return false
return False
def height_farmed(self, genesis_challenge: bytes32) -> Optional[uint32]:
if not self.confirmed:
return None
if self.type == TransactionType.FEE_REWARD or self.type == TransactionType.COINBASE_REWARD:
for block_index in range(self.confirmed_at_height, self.confirmed_at_height - 100, -1):
if block_index < 0:
return None
pool_parent = pool_parent_id(uint32(block_index), genesis_challenge)
farmer_parent = farmer_parent_id(uint32(block_index), genesis_challenge)
if pool_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if farmer_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
return None
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
@tf_export(v1=["train.SyncReplicasOptimizer"])
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
This class is deprecated. For synchronous training, please use [Distribution
Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch. Please note that
some workers can consume multiple minibatches, while some may not consume
even one. This is because each worker fetches minibatches as long as
a token exists. If one worker is stuck for some reason and does not
consume a token, another worker can use it.
For the replicas:
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.compat.v1.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```python
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
@deprecation.deprecated(
None, "The `SyncReplicaOptimizer` class is deprecated. For synchronous "
"training, please use [Distribution Strategies](https://github.com/"
"tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).",
warn_once=True)
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(local_anchor):
self._local_step = variable_scope.variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(
sync_token_queue, [sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def variables(self):
"""Fetches a list of optimizer variables in the default graph.
This wraps `variables()` from the actual optimizer. It does not include
the `SyncReplicasOptimizer`'s local step.
Returns:
A list of variables.
"""
return self._opt.variables()
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicasOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
|
import lasagne
import theano
import theano.tensor as T
import numpy as np
class ThinSplineTransformerLayer(lasagne.layers.MergeLayer):
"""
Thin plate spline spatial transformer layer
The layer applies an thin plate spline transformation [2] on the input.
The transform is determined based on the movement of some number of control
points. The starting positions for these control points are fixed, and the
destinations of these points are fed in as the output of the localization
network.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
localization_network : a :class:`Layer` instance
The network that calculates the parameters of the thin plate spline
transformation as the x and y coordinates of the destinations of each
control point. The output of the localization network must be twice the
number of control points.
downsample_factor : float or iterable of float
A float or a 2-element tuple specifying the downsample factor for the
output image (in both spatial dimensions). A value of 1 will keep the
original size of the input. Values larger than 1 will downsample the
input. Values below 1 will upsample the input.
num_control_points : integer
The number of control points to be used. These points will be arranged
as a grid along the image, so the value must be a perfect square.
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] Principal warps: thin-plate splines and the decomposition of deformations.
Fred L. Bookstein, 1989, IEEE Transactions on Pattern Analysis and Machine Intelligence.
http://doi.org/10.1109/34.24792
Examples
--------
Here, we'll simply implement an identity transform. First we'll create the source
and destination control points. To make everything invariant to the shape of the
image, the x and y range of the image is normalized to [-1, 1] as in ref [1]. To
replicate an identity transform, we'll simply set the offsets to 0. More complicated
transformations can easily be implemented using different x and y offsets (importantly,
each control point can have it's own pair of offsets).
>>> import numpy as np
>>> import lasagne
>>> num_control_points = 16
>>> grid_size = np.sqrt(num_control_points)
>>> x_control_source, y_control_source = np.meshgrid(np.linspace(-1, 1, grid_size),
>>> np.linspace(-1, 1, grid_size))
>>> # offset for the control points
>>> x_offset = 0
>>> y_offset = 0
>>> x_control_dest = x_control_source.flatten() + x_offset
>>> y_control_dest = y_control_source.flatten() + y_offset
>>> dest_points = np.vstack((x_control_dest, y_control_dest)).flatten()
>>>
>>> # We'll set the bias to be the source points
>>> b = np.vstack((x_control_source.flatten(), y_control_source.flatten())).flatten()
>>>
>>> # Create the network
>>> W = lasagne.init.Constant(0.0)
>>> l_in = lasagne.layers.InputLayer((None, 3, 28, 28)) # (batch_size, channels, height, width)
>>> l_loc = lasagne.layers.DenseLayer(l_in, num_units=2*num_control_points, W=W, b=b,
... nonlinearity=None)
>>> l_trans = lasagne.layers.ThinSplineTransformerLayer(l_in, l_loc,
... num_control_points=num_control_points)
"""
def __init__(self, incoming, localization_network, downsample_factor=1,
num_control_points=16, **kwargs):
super(ThinSplineTransformerLayer, self).__init__(
[incoming, localization_network], **kwargs)
self.downsample_factor = lasagne.utils.as_tuple(downsample_factor, 2)
self.num_control_points = num_control_points
input_shp, loc_shp = self.input_shapes
if loc_shp[-1] != 2*num_control_points or len(loc_shp) != 2:
raise ValueError("The localization network must have "
"output shape: (batch_size, 2*num_control_points)")
if round(np.sqrt(num_control_points)) != np.sqrt(num_control_points):
raise ValueError("The number of control points must be"
" a perfect square.")
if len(input_shp) != 4:
raise ValueError("The input network must have a 4-dimensional "
"output shape: (batch_size, num_input_channels, "
"input_rows, input_columns)")
# Create source points and L matrix
self.source_points, self.L_inv = _initialize_tps(num_control_points)
def get_output_shape_for(self, input_shapes):
shape = input_shapes[0]
factors = self.downsample_factor
return (shape[:2] + tuple(None if s is None else int(s / f)
for s, f in zip(shape[2:], factors)))
def get_output_for(self, inputs, **kwargs):
# see eq. (1) and sec 3.1 in [1]
input, control_points = inputs
return _transform_thin_thin_plate_spline(control_points, input,
self.source_points, self.L_inv,
self.downsample_factor)
def _transform_thin_thin_plate_spline(dest_points, input, source_points, L_inv, downsample_factor):
num_batch, num_channels, height, width = input.shape
num_control_points = source_points.shape[1]
# reshape destination points to be (num_batch, 2, num_control_points)
dest_points = T.reshape(dest_points, (num_batch, 2, num_control_points))
# Solve as in ref [2]
coefficients = T.dot(dest_points, L_inv[:, 3:].T)
# Transformed grid
out_height = T.cast(height / downsample_factor[0], 'int64')
out_width = T.cast(width / downsample_factor[1], 'int64')
orig_grid = _meshgrid(out_height, out_width)
orig_grid = orig_grid[0:2, :]
orig_grid = T.tile(orig_grid, (num_batch, 1, 1))
# Transform each point on the source grid (image_size x image_size)
transformed_points = _get_transformed_points_tps(orig_grid, source_points,
coefficients, num_control_points,
num_batch)
# Get out new points
x_transformed = transformed_points[:, 0].flatten()
y_transformed = transformed_points[:, 1].flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_transformed, y_transformed,
out_height, out_width)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
def _get_transformed_points_tps(new_points, source_points, coefficients, num_points, batch_size):
"""
Calculates the transformed points' value using the provided coefficients
:param new_points: num_batch x 2 x num_to_transform tensor
:param source_points: 2 x num_points array of source points
:param coefficients: coefficients (should be shape (num_batch, 2, control_points + 3))
:param num_points: the number of points
:return: the x and y coordinates of each transformed point. Shape (num_batch, 2, num_to_transform)
"""
# Calculate the U function for the new point and each source point as in ref [2]
# The U function is simply U(r) = r^2 * log(r^2), where r^2 is the squared distance
# Calculate the squared distance between the new point and the source points
to_transform = new_points.dimshuffle(0, 'x', 1, 2) # (batch_size, 1, 2, num_to_transform)
stacked_transform = T.tile(to_transform, (1, num_points, 1, 1)) # (batch_size, num_points, 2, num_to_transform)
r_2 = T.sum(((stacked_transform - source_points.dimshuffle('x', 1, 0, 'x')) ** 2), axis=2)
# Take the product (r^2 * log(r^2)), being careful to avoid NaNs
log_r_2 = T.log(r_2)
distances = T.switch(T.isnan(log_r_2), r_2 * log_r_2, 0.)
# Add in the coefficients for the affine translation (1, x, and y, corresponding to a_1, a_x, and a_y)
upper_array = T.concatenate([T.ones((batch_size, 1, new_points.shape[2]), dtype=theano.config.floatX),
new_points], axis=1)
right_mat = T.concatenate([upper_array, distances], axis=1)
# Calculate the new value as the dot product
new_value = T.batched_dot(coefficients, right_mat)
return new_value
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
def _linspace(start, stop, num):
# Theano linspace. Behaves similar to np.linspace
start = T.cast(start, theano.config.floatX)
stop = T.cast(stop, theano.config.floatX)
num = T.cast(num, theano.config.floatX)
step = (stop-start)/(num-1)
return T.arange(num, dtype=theano.config.floatX)*step+start
def _meshgrid(height, width):
# This function is the grid generator from eq. (1) in reference [1].
# It is equivalent to the following numpy code:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
# It is implemented in Theano instead to support symbolic grid sizes.
# Note: If the image size is known at layer construction time, we could
# compute the meshgrid offline in numpy instead of doing it dynamically
# in Theano. However, it hardly affected performance when we tried.
x_t = T.dot(T.ones((height, 1)),
_linspace(-1.0, 1.0, width).dimshuffle('x', 0))
y_t = T.dot(_linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
T.ones((1, width)))
x_t_flat = x_t.reshape((1, -1))
y_t_flat = y_t.reshape((1, -1))
ones = T.ones_like(x_t_flat)
grid = T.concatenate([x_t_flat, y_t_flat, ones], axis=0)
return grid
def _U_func_numpy(x1, y1, x2, y2):
"""
Function which implements the U function from Bookstein paper
:param x1: x coordinate of the first point
:param y1: y coordinate of the first point
:param x2: x coordinate of the second point
:param y2: y coordinate of the second point
:return: value of z
"""
# Return zero if same point
if x1 == x2 and y1 == y2:
return 0.
# Calculate the squared Euclidean norm (r^2)
r_2 = (x2 - x1) ** 2 + (y2 - y1) ** 2
# Return the squared norm (r^2 * log r^2)
return r_2 * np.log(r_2)
def _initialize_tps(num_control_points):
"""
Initializes the thin plate spline calculation by creating the source point array and
the inverted L matrix used for calculating the transformations as in ref [2]
:param num_control_points: the number of control points. Must be a perfect square.
Points will be used to generate an evenly spaced grid.
:return:
source_points: shape (2, num_control_points) tensor
L_inv: shape (num_control_points + 3, num_control_points + 3) tensor
"""
# Create source grid
grid_size = np.sqrt(num_control_points)
x_control_source, y_control_source = np.meshgrid(np.linspace(-1, 1, grid_size),
np.linspace(-1, 1, grid_size))
# Create 2 x num_points array of source points
source_points = np.vstack((x_control_source.flatten(), y_control_source.flatten()))
# Get number of equations
num_equations = num_control_points + 3
# Initialize L to be num_equations square matrix
L = np.zeros((num_equations, num_equations))
# Create P matrix components
L[0, 3:num_equations] = 1.
L[1:3, 3:num_equations] = source_points
L[3:num_equations, 0] = 1.
L[3:num_equations, 1:3] = source_points.T
# Loop through each pair of points and create the K matrix
for point_1 in range(num_control_points):
for point_2 in range(point_1, num_control_points):
L[point_1 + 3, point_2 + 3] = _U_func_numpy(source_points[0, point_1], source_points[1, point_1],
source_points[0, point_2], source_points[1, point_2])
if point_1 != point_2:
L[point_2 + 3, point_1 + 3] = L[point_1 + 3, point_2 + 3]
# Invert
L_inv = np.linalg.inv(L)
# Convert to floatX
L_inv = L_inv.astype(theano.config.floatX)
source_points = source_points.astype(theano.config.floatX)
# Convert to tensors
L_inv = T.as_tensor_variable(L_inv)
source_points = T.as_tensor_variable(source_points)
return source_points, L_inv
|
"""
Preservando Metadata com Warps
Metadados -> São dados intrisecos em arquivos.
Wraps -> São funções que envolvem elementos com diversas finalidades.
# Problema
def ver_log(funcao):
def logar(*args, **kwargs):
Eu sou uma função (logar) dentro de outra
print(f'Você está chamando {funcao.__name__}')
print(f'Aqui a documentação: {funcao.__doc__}')
return funcao(*args, **kwargs)
return logar
@ver_log
def soma(a, b):
Soma dois números
return a + b
print(soma(10, 30))
print(soma.__name__) # soma
print(soma.__doc__) # Soma dois números
"""
# Resolução do Problema
from functools import wraps
def ver_log(funcao):
@wraps(funcao)
def logar(*args, **kwargs):
"""Eu sou uma função (logar) dentro de outra"""
print(f'Você está chamando {funcao.__name__}')
print(f'Aqui a documentação: {funcao.__doc__}')
return funcao(*args, **kwargs)
return logar
@ver_log
def soma(a, b):
"""Soma dois números"""
return a + b
print(soma(10, 30))
print(soma.__name__) # soma
print(soma.__doc__) # Soma dois números
print(help(soma))
|
import sys
import subprocess
import re
import os
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six import string_types
import oddt
from oddt.utils import (is_openbabel_molecule,
is_molecule,
check_molecule)
from oddt.spatial import rmsd
class autodock_vina(object):
def __init__(self,
protein=None,
auto_ligand=None,
size=(20, 20, 20),
center=(0, 0, 0),
exhaustiveness=8,
num_modes=9,
energy_range=3,
seed=None,
prefix_dir='/tmp',
n_cpu=1,
executable=None,
autocleanup=True,
skip_bad_mols=True):
"""Autodock Vina docking engine, which extends it's capabilities:
automatic box (auto-centering on ligand).
Parameters
----------
protein: oddt.toolkit.Molecule object (default=None)
Protein object to be used while generating descriptors.
auto_ligand: oddt.toolkit.Molecule object or string (default=None)
Ligand use to center the docking box. Either ODDT molecule or
a file (opened based on extesion and read to ODDT molecule).
Box is centered on geometric center of molecule.
size: tuple, shape=[3] (default=(20, 20, 20))
Dimentions of docking box (in Angstroms)
center: tuple, shape=[3] (default=(0,0,0))
The center of docking box in cartesian space.
exhaustiveness: int (default=8)
Exhaustiveness parameter of Autodock Vina
num_modes: int (default=9)
Number of conformations generated by Autodock Vina. The maximum
number of docked poses is 9 (due to Autodock Vina limitation).
energy_range: int (default=3)
Energy range cutoff for Autodock Vina
seed: int or None (default=None)
Random seed for Autodock Vina
prefix_dir: string (default=/tmp)
Temporary directory for Autodock Vina files
executable: string or None (default=None)
Autodock Vina executable location in the system.
It's realy necessary if autodetection fails.
autocleanup: bool (default=True)
Should the docking engine clean up after execution?
skip_bad_mols: bool (default=True)
Should molecules that crash Autodock Vina be skipped.
"""
self.dir = prefix_dir
self._tmp_dir = None
# define binding site
self.size = size
self.center = center
# center automaticaly on ligand
if auto_ligand:
if isinstance(auto_ligand, string_types):
extension = auto_ligand.split('.')[-1]
auto_ligand = next(oddt.toolkit.readfile(extension, auto_ligand))
self.center = auto_ligand.coords.mean(axis=0).round(3)
# autodetect Vina executable
if not executable:
try:
self.executable = (subprocess.check_output(['which', 'vina'])
.decode('ascii').split('\n')[0])
except subprocess.CalledProcessError:
raise Exception('Could not find Autodock Vina binary.'
'You have to install it globaly or supply binary'
'full directory via `executable` parameter.')
else:
self.executable = executable
# detect version
self.version = (subprocess.check_output([self.executable, '--version'])
.decode('ascii').split(' ')[2])
self.autocleanup = autocleanup
self.cleanup_dirs = set()
# share protein to class
self.protein = None
self.protein_file = None
if protein:
self.set_protein(protein)
self.skip_bad_mols = skip_bad_mols
self.n_cpu = n_cpu
if self.n_cpu > exhaustiveness:
warnings.warn('Exhaustiveness is lower than n_cpus, thus CPU will '
'not be saturated.')
# pregenerate common Vina parameters
self.params = []
self.params += ['--center_x', str(self.center[0]),
'--center_y', str(self.center[1]),
'--center_z', str(self.center[2])]
self.params += ['--size_x', str(self.size[0]),
'--size_y', str(self.size[1]),
'--size_z', str(self.size[2])]
self.params += ['--exhaustiveness', str(exhaustiveness)]
if seed is not None:
self.params += ['--seed', str(seed)]
if num_modes > 9 or num_modes < 1:
raise ValueError('The number of docked poses must be between 1 and 9'
' (due to Autodock Vina limitation).')
self.params += ['--num_modes', str(num_modes)]
self.params += ['--energy_range', str(energy_range)]
@property
def tmp_dir(self):
if not self._tmp_dir:
self._tmp_dir = mkdtemp(dir=self.dir, prefix='autodock_vina_')
self.cleanup_dirs.add(self._tmp_dir)
return self._tmp_dir
@tmp_dir.setter
def tmp_dir(self, value):
self._tmp_dir = value
def set_protein(self, protein):
"""Change protein to dock to.
Parameters
----------
protein: oddt.toolkit.Molecule object
Protein object to be used.
"""
# generate new directory
self._tmp_dir = None
if protein:
if isinstance(protein, string_types):
extension = protein.split('.')[-1]
if extension == 'pdbqt':
self.protein_file = protein
self.protein = next(oddt.toolkit.readfile(extension, protein))
self.protein.protein = True
else:
self.protein = next(oddt.toolkit.readfile(extension, protein))
self.protein.protein = True
else:
self.protein = protein
# skip writing if we have PDBQT protein
if self.protein_file is None:
self.protein_file = write_vina_pdbqt(self.protein, self.tmp_dir,
flexible=False)
def score(self, ligands, protein=None):
"""Automated scoring procedure.
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to score
protein: oddt.toolkit.Molecule object or None
Protein object to be used. If None, then the default
one is used, else the protein is new default.
Returns
-------
ligands : array of oddt.toolkit.Molecule objects
Array of ligands (scores are stored in mol.data method)
"""
if protein:
self.set_protein(protein)
if not self.protein_file:
raise IOError("No receptor.")
if is_molecule(ligands):
ligands = [ligands]
ligand_dir = mkdtemp(dir=self.tmp_dir, prefix='ligands_')
output_array = []
for n, ligand in enumerate(ligands):
check_molecule(ligand, force_coords=True)
ligand_file = write_vina_pdbqt(ligand, ligand_dir, name_id=n)
try:
scores = parse_vina_scoring_output(
subprocess.check_output([self.executable, '--score_only',
'--receptor', self.protein_file,
'--ligand', ligand_file] + self.params,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output.decode('ascii'))
if self.skip_bad_mols:
continue
else:
raise Exception('Autodock Vina failed. Command: "%s"' %
' '.join(e.cmd))
ligand.data.update(scores)
output_array.append(ligand)
rmtree(ligand_dir)
return output_array
def dock(self, ligands, protein=None):
"""Automated docking procedure.
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to dock
protein: oddt.toolkit.Molecule object or None
Protein object to be used. If None, then the default one
is used, else the protein is new default.
Returns
-------
ligands : array of oddt.toolkit.Molecule objects
Array of ligands (scores are stored in mol.data method)
"""
if protein:
self.set_protein(protein)
if not self.protein_file:
raise IOError("No receptor.")
if is_molecule(ligands):
ligands = [ligands]
ligand_dir = mkdtemp(dir=self.tmp_dir, prefix='ligands_')
output_array = []
for n, ligand in enumerate(ligands):
check_molecule(ligand, force_coords=True)
ligand_file = write_vina_pdbqt(ligand, ligand_dir, name_id=n)
ligand_outfile = ligand_file[:-6] + '_out.pdbqt'
try:
scores = parse_vina_docking_output(
subprocess.check_output([self.executable, '--receptor',
self.protein_file,
'--ligand', ligand_file,
'--out', ligand_outfile] +
self.params +
['--cpu', str(self.n_cpu)],
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output.decode('ascii'))
if self.skip_bad_mols:
continue # TODO: print some warning message
else:
raise Exception('Autodock Vina failed. Command: "%s"' %
' '.join(e.cmd))
# docked conformations may have wrong connectivity - use source ligand
if is_openbabel_molecule(ligand):
if oddt.toolkits.ob.__version__ >= '2.4.0':
# find the order of PDBQT atoms assigned by OpenBabel
with open(ligand_file) as f:
write_order = [int(line[7:12].strip())
for line in f
if line[:4] == 'ATOM']
new_order = sorted(range(len(write_order)),
key=write_order.__getitem__)
new_order = [i + 1 for i in new_order] # OBMol has 1 based idx
assert len(new_order) == len(ligand.atoms)
else:
# Openbabel 2.3.2 does not support perserving atom order.
# We read back the PDBQT ligand to get "correct" bonding.
ligand = next(oddt.toolkit.readfile('pdbqt', ligand_file))
if 'REMARK' in ligand.data:
del ligand.data['REMARK']
docked_ligands = oddt.toolkit.readfile('pdbqt', ligand_outfile)
for docked_ligand, score in zip(docked_ligands, scores):
# Renumber atoms to match the input ligand
if (is_openbabel_molecule(docked_ligand) and
oddt.toolkits.ob.__version__ >= '2.4.0'):
docked_ligand.OBMol.RenumberAtoms(new_order)
# HACK: copy docked coordinates onto source ligand
# We assume that the order of atoms match between ligands
clone = ligand.clone
clone.clone_coords(docked_ligand)
clone.data.update(score)
# Calculate RMSD to the input pose
clone.data['vina_rmsd_input'] = rmsd(ligand, clone)
clone.data['vina_rmsd_input_min'] = rmsd(ligand, clone,
method='min_symmetry')
output_array.append(clone)
rmtree(ligand_dir)
return output_array
def clean(self):
for d in self.cleanup_dirs:
rmtree(d)
def predict_ligand(self, ligand):
"""Local method to score one ligand and update it's scores.
Parameters
----------
ligand: oddt.toolkit.Molecule object
Ligand to be scored
Returns
-------
ligand: oddt.toolkit.Molecule object
Scored ligand with updated scores
"""
return self.score([ligand])[0]
def predict_ligands(self, ligands):
"""Method to score ligands lazily
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to be scored
Returns
-------
ligand: iterator of oddt.toolkit.Molecule objects
Scored ligands with updated scores
"""
return self.score(ligands)
def write_vina_pdbqt(mol, directory, flexible=True, name_id=None):
"""Write single PDBQT molecule to a given directory. For proteins use
`flexible=False` to avoid encoding torsions. Additionally an name ID can
be appended to a name to avoid conflicts.
"""
if name_id is None:
name_id = ''
# We expect name such as 0_ZINC123456.pdbqt or simply ZINC123456.pdbqt if no
# name_id is specified. All non alpha-numeric signs are replaced with underscore.
mol_file = ('_'.join(filter(None, [str(name_id),
re.sub('[^A-Za-z0-9]+', '_', mol.title)]
)) + '.pdbqt')
# prepend path to filename
mol_file = os.path.join(directory, mol_file)
if is_openbabel_molecule(mol):
if flexible:
# auto bonding (b), perserve atom names (n) indices (p) and Hs (h)
kwargs = {'opt': {'b': None, 'p': None, 'h': None, 'n': None}}
else:
# for proteins write rigid mol (r) and combine all frags in one (c)
kwargs = {'opt': {'r': None, 'c': None, 'h': None}}
else:
kwargs = {'flexible': flexible}
# HACK: fix OB 2.3.2 PDBQT bugs
if (not flexible and is_openbabel_molecule(mol) and
oddt.toolkits.ob.__version__ < '2.4.0'):
with open(mol_file, 'w') as f:
for line in mol.write('pdbqt', overwrite=True, **kwargs).split('\n'):
# remove OB 2.3 ROOT/ENDROOT tags
if line in ['ROOT', 'ENDROOT']:
continue
elif line[:7] == 'TORSDOF':
f.write('TER\n')
else:
f.write(line + '\n')
else:
mol.write('pdbqt', mol_file, overwrite=True, **kwargs)
return mol_file
def parse_vina_scoring_output(output):
"""Function parsing Autodock Vina scoring output to a dictionary
Parameters
----------
output : string
Autodock Vina standard ouptud (STDOUT).
Returns
-------
out : dict
dicitionary containing scores computed by Autodock Vina
"""
out = {}
r = re.compile('^(Affinity:|\s{4})')
for line in output.decode('ascii').split('\n')[13:]: # skip some output
if r.match(line):
m = line.replace(' ', '').split(':')
if m[0] == 'Affinity':
m[1] = m[1].replace('(kcal/mol)', '')
out[str('vina_' + m[0].lower())] = float(m[1])
return out
def parse_vina_docking_output(output):
"""Function parsing Autodock Vina docking output to a dictionary
Parameters
----------
output : string
Autodock Vina standard ouptud (STDOUT).
Returns
-------
out : dict
dicitionary containing scores computed by Autodock Vina
"""
out = []
r = re.compile('^\s+\d\s+')
for line in output.decode('ascii').split('\n')[13:]: # skip some output
if r.match(line):
s = line.split()
out.append({'vina_affinity': s[1],
'vina_rmsd_lb': s[2],
'vina_rmsd_ub': s[3]})
return out
|
import cv2
import numpy as np
import pyautogui
import time
while True:
img = np.array(pyautogui.screenshot(region = (380, 300, 320, 220)))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
frame = img
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Yellow color
low_yellow = np.array([17, 50, 50])
high_yellow = np.array([25, 255, 255])
yellow_mask = cv2.inRange(hsv_frame, low_yellow, high_yellow)
yellow = cv2.bitwise_and(frame, frame, mask=yellow_mask)
median = cv2.medianBlur(yellow, 15)
hsv_frame2 = cv2.cvtColor(median, cv2.COLOR_BGR2HSV)
yellow_mask2 = cv2.inRange(hsv_frame2, low_yellow, high_yellow)
kernel = np.ones((5,5),np.uint8)
#diolation = cv2.dilate(yellow_mask2,kernel,iterations=1)
cv2.imshow("yellow2", yellow_mask2)
#cv2.imshow("frame", frame)
cv2.imshow("median", median)
t = 0
for i in yellow_mask2.tolist():
sliced = i[slice(0,320,32)]
#time.sleep(5)
if 255 in sliced:
left_side = sliced[0:sliced.index(255)+1].count(0)
right_side = sliced[sliced.index(255):10].count(0)
t +=1
try :
if t > 50 and 0.75 <= (left_side/right_side) <= 1.50 :
print("You are on the line!!")
elif t > 50 and (left_side/right_side) < 0.75 :
print("You are on the right side of the road.")
elif t > 50 and 1.50 < (left_side/right_side) :
print("You are on the left side of the road.")
except ZeroDivisionError:
pass
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
# -*- coding:utf-8 -*-
"""
"""
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
from . import if_cuml_ready, is_cuml_installed
if is_cuml_installed:
import cudf
from hypernets.tabular.cuml_ex import CumlToolBox
@if_cuml_ready
class TestCumlTransformer:
@classmethod
def setup_class(cls):
from sklearn.preprocessing import LabelEncoder
df = dsutils.load_bank()
df['y'] = LabelEncoder().fit_transform(df['y'])
df['education'] = LabelEncoder().fit_transform(df['education'])
cf = cudf.from_pandas(df)
cls.df = df
cls.cf = cf
def test_get_toolbox(self):
tb = get_tool_box(self.cf)
assert tb is CumlToolBox
def test_general_preprocessor(self):
X = self.cf.copy()
y = X.pop('y')
preprocessor = CumlToolBox.general_preprocessor(self.cf)
Xt = preprocessor.fit_transform(X, y)
assert CumlToolBox.is_cuml_object(Xt)
# dtypes
dtypes = set(map(str, Xt.dtypes.to_dict().values()))
assert dtypes.issubset({'float64', 'int64', 'uint8'})
def test_general_estimator(self):
X = self.cf.copy()
y = X.pop('y')
preprocessor = CumlToolBox.general_preprocessor(self.cf)
Xt = preprocessor.fit_transform(X, y)
for s in [None, 'xgb', 'rf', 'gbm']:
est = CumlToolBox.general_estimator(Xt, y, estimator=s)
est.fit(Xt, y)
assert len(est.classes_) == 2
pred = est.predict(Xt)
assert CumlToolBox.is_cuml_object(pred)
proba = est.predict_proba(Xt)
assert CumlToolBox.is_cuml_object(proba)
|
import numpy as np
import pandas as pd
import datetime
from okokyst_metadata import surveys_lookup_table
import os
import re
import glob
import gsw
from okokyst_tools import pressure_to_depth
encoding = "ISO-8859-1"
__author__ = 'Elizaveta Protsenko'
__email__ = 'Elizaveta.Protsenko@niva.no'
__created__ = datetime.datetime(2020, 9, 23)
__version__ = "1.0"
__status__ = "Development"
def to_rename_columns(df,old_name, new_name):
if old_name in df.columns:
df = df.rename(columns={old_name : new_name})
return df
def modify_df(df,onedrive,filename):
#print ("modify_df")
'''
Convert columns name to the format used further in the processing steps
'''
# df = to_rename_columns(df, 'Press', "Depth")
# (df.columns)
df = to_rename_columns(df, 'Depth(u)', "Depth")
df = to_rename_columns(df, 'Sal.', 'Salinity')
df = to_rename_columns(df, 'T(FTU)', 'FTU')
df = to_rename_columns(df, 'T (FTU)', 'FTU')
df = to_rename_columns(df, 'OpOx %', 'OptOx')
df = to_rename_columns(df, 'Ox %', 'OptOx')
df = to_rename_columns(df, 'mg/l', 'OxMgL')
df = to_rename_columns(df, 'Opt', 'OptOx')
df = to_rename_columns(df, 'Opmg/l', 'OxMgL')
df = to_rename_columns(df, 'Opml/l', 'OxMlL')
# recalculate Oxygen into Ml/l
convert_dict = {
'Press': float
}
df = df.astype(convert_dict)
#print ("press to float")
if 'OxMgL' in df.columns:
print ('recalculate to ml/l')
df = df.astype({'OxMgL': float})
df['OxMgL'] = df.OxMgL.values / 1.42905
df = to_rename_columns(df, 'OxMgL', 'OxMlL')
try:
df['Date'] = pd.to_datetime(df['Date'], format='%d.%m.%Y').dt.strftime('%d.%m.%Y')
except Exception as e:
print ('date',e)
try:
df['Time'] = pd.to_datetime(df['Time'], format='%H:%M:%S').dt.strftime('%H.%M.%S')
except Exception as e:
print ('time', e)
try:
df = df.astype({'OxMlL': float})
except Exception as e:
print ('float', e)
try:
df = df.astype({'OxMgL': float})
except:
print ('Probably Oxygen is missing')
df = df.dropna(how='all', axis=1)
df = df.round(4)
if len(set(df['OptOx'].values)) < 5:
er=open(f"{onedrive}\\NoOxygenData.txt","w+")
er.write(filename)
er.close()
return df
class processStation(object):
def __init__(self, inputpath,onedrive,survey = None):
self.input_path = inputpath
self.base_path = os.path.split(self.input_path)[0]
name = os.path.split(self.input_path)[1]
self.onedrive = onedrive
if survey != None:
self.survey = survey
else:
self.survey = self.get_region_from_path()
#try:
# y = re.findall("[0-9]", str(name))
# x = ''.join(y)
# print (name,x)
# self.correct_survey_date = pd.to_
# datetime(x, format='%Y%m%d').strftime('%d.%m.%Y')
# print ('correct_survey_date', self.correct_survey_date)#.values
#except:
# y = re.findall("[0-9]{8}", str(name))
# x = ''.join(y)
# print(name, x)
# self.correct_survey_date = pd.to_datetime(x, format='%Y%m%d').strftime('%d.%m.%Y')
# print('correct_survey_date', self.correct_survey_date) # .values
self.non_assigned = []
self.assigned = []
self.stations_list = list(surveys_lookup_table[self.survey].keys())
self.stations_depths = np.array([surveys_lookup_table[self.survey][st]['depth'] for st in self.stations_list])
self.df_all = self.read_convert_df()
try:
self.calc_depth()
except Exception as e:
print('Error in reading the dataframe', e)
try:
self.df_all = modify_df(self.df_all, self.onedrive,name)
grouped = self.df_all.groupby('Ser')
for name, group_df in grouped:
self.match_stations_by_depth(group_df)
except Exception as e:
print('Error in reading the dataframe',e)
def calc_depth(self):
first_st = list(surveys_lookup_table[self.survey].keys())[0]
#print ('calc depth')
latitude = surveys_lookup_table[self.survey][first_st]["station.latitude"]
depths = []
for p in self.df_all['Press'].values:
d = pressure_to_depth(float(p), latitude)
depths.append(d)
self.df_all['Depth'] = depths
def get_region_from_path(self):
regions = {'Leon': 'Sognefjorden', 'Kvitsoy': 'Hardangerfjorden',
'Hardangerfjorden': 'Hardangerfjorden', 'Sognefjorden': 'Sognefjorden', 'RMS': 'RMS',
'Aquakompetens': 'Aqua kompetanse'}
for r in regions:
name_to_check = re.compile(r, re.IGNORECASE)
find_match = name_to_check.search(self.input_path)
if find_match:
return regions[r]
def read_convert_df(self):
print ('\n******************************')
print ('Reading', self.input_path)
# read the document and skip undefined number of unneeded rows
for n in range(1, 16):
#print('Attempt N', n)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n-1,
sep=';', decimal=',', encoding=encoding)
#print (df_all.head())
if len(df_all.columns) < 10:
#print('short', df_all.columns)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n,
sep=';', decimal=',', encoding=encoding)
#print(df_all.columns)
break
except Exception as e:
#print('Exception 2')
pass
else:
break
except Exception as e:
#print('Exception 1')
df_all = None
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n-1,
sep=';', decimal='.')
if len(df_all.columns) < 10:
#print('short', df_all.columns)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n,
sep=';', decimal=',')
#print(df_all.columns)
df_all.head()
break
except Exception as e:
#print('Exception 4')
pass
except Exception as e:
#print('Exception 3')
df_all = None
try:
pass
#print ('Successfully read file')
#print (df_all.columns)
except Exception as e:
#print (e)
pass
return df_all
def match_stations_by_depth(self, group):
# Get number of the cast
Ser = group['Ser'].values[0]
print('Processing Cast', Ser)
self.survey_date = group.Date.values[0]
max_depth = np.max(group['Depth'].max())
# find the closest depth in the arr with all stations for this region
difs = self.stations_depths - max_depth
print('difs', difs)
difs_pos = list(filter(lambda x : x > -1, difs))
#print (difs_pos,'filtered difs')
#sqr_difs = np.sqrt(difs**2)
min_dif = np.min(difs_pos)
print('max depth', max_depth,'min difference', min_dif, 'Time', group.Time.values[0])
self.make_new_base_path()
if 'Salinity' not in group.columns:
group = self.calc_salinity(group)
#if self.survey == 'Hardangerfjorden':
# dif_threshold = 50
#else:
dif_threshold = 50
group=group.drop(columns=['Press'])
columns = group.columns
if 'OxMgL' in columns:
columnOrder=['Ser','Meas','Salinity','Conductivity', 'Temp', 'FTU',
'OptOx', 'OxMgL', 'Density', 'Depth', 'Date', 'Time']
#print('max OxMlL') #, group['OxMgL'].max(), group.columns)
else:
columnOrder=['Ser','Meas','Salinity','Conductivity', 'Temp', 'FTU',
'OptOx', 'OxMlL', 'Density', 'Depth', 'Date', 'Time']
#print('max OxMlL') #, group['OxMlL'].max(), group.columns)
group=group.reindex(columns=columnOrder)
if min_dif < dif_threshold:
# double check the sign of the difference (if cast went deeper than the station, do no assign)
nearest_depth_id = np.where(difs == min_dif)[0][0]
#print ('stations list', self.stations_list)
self.station_name = self.stations_list[nearest_depth_id]
self.station_metadata = surveys_lookup_table[self.survey][self.station_name]
if self.station_name in self.assigned:
print(self.station_name, 'already assigned stations:', self.assigned)
print ("duplicate")
self.station_name = self.station_name + "_duplicate"
# Save df matched by station
#self.filename = os.path.join(self.base_path, self.station_name + '.txt')
self.filename = os.path.join(self.new_base_path, self.station_name + '_temp.txt')
self.figname = os.path.join(self.new_base_path, self.station_name + '.png')
print('Assigned station_name', self.station_name)
##print('save data to file with ', self.filename, Ser)
import matplotlib.pyplot as plt
plt.figure()
plt.style.use('ggplot')
plt.title(self.station_name)
plt.plot(group['OxMlL'],group.Depth)
plt.ylim(group.Depth.max(),group.Depth.min())
plt.savefig(self.figname)
group.to_csv(self.filename, sep=';')
#Add header and save update file in the new location
self.assigned.append(self.station_name)
self.add_metadata_header()
else:
print('Was not able to find a matching station name')
if max_depth < 10:
print("Probably it is a cleaning station ")
new_filename = os.path.join(self.new_base_path, 'Cleaning_station' + str(Ser) + '.txt')
else:
#print('available station depths', self.stations_depths)
#filename = self.base_path + r'\\Unknown_station' + str(Ser) + '.txt'
print('Cast Unknown_station', Ser)
new_filename = self.new_base_path + r'\\Unknown_station' + str(Ser) + '.txt'
self.non_assigned.append(new_filename)
#group.to_csv(filename, index=False, sep=';')
#print (group['OxMlL'].values.max())
group.to_csv(new_filename, index=False, sep=';')
#else:
# print ('Date of measurement does not match date in a filename')
# print(self.survey_date, self.correct_survey_date, self.survey_date == self.correct_survey_date)
return
def calc_salinity(self,group):
''' If salinity is not in the list
calculate if from TSP
'''
print( 'calculating_salinity')
salinity = []
for n in range(len(group['Cond.'])):
s = gsw.SP_from_C(group['Cond.'].values[n], group['Temp'].values[n], group['Press'].values[n])
salinity.append(s)
group['Salinity'] = salinity
return group
def make_new_base_path(self):
# datetime.datetime.strptime(
date_folder = pd.to_datetime(str(self.survey_date), format='%d.%m.%Y').strftime('%Y-%m-%d')
##self.new_base_path = os.path.join(onedrive, self.survey, date_folder, date_folder + " CTD data")
self.new_base_path = os.path.join(self.onedrive, date_folder + " CTD data")
if not os.path.exists(self.new_base_path):
os.makedirs(self.new_base_path)
def add_metadata_header(self):
header = self.station_metadata['station.header']
#print ('adding metadata header to ', self.station_name,'.txt')
new_filename = os.path.join(self.new_base_path, self.station_name + '.txt')
print ('save data to', new_filename)
# Open initial file, update header, save the new file in One_Drive
with open(self.filename, 'r') as read_obj, open(new_filename, 'w') as write_obj:
write_obj.write(header)
for line in read_obj:
write_obj.write(line)
try:
os.remove(self.filename)
except Exception as e:
print(e)
def manual_add_metadata_header(filepath, station_name):
t = surveys_lookup_table
base_path = os.path.split(filepath)[0]
surveys = t.keys()
for key in surveys:
if station_name in t[key]:
header = t[key][station_name]['station.header']
break
new_filename = os.path.join(base_path, station_name + '.txt')
# Open initial file, update header, save the new file in One_Drive
with open(filepath, 'r') as read_obj, open(new_filename, 'w') as write_obj:
write_obj.write(header)
for line in read_obj:
write_obj.write(line)
try:
os.remove(filepath)
except Exception as e:
print (e)
#os.rename(filepath, base_path +f'to_{station_name}.txt')
if __name__ == "__main__":
#k_work_dir = r'K:/Avdeling/214-Oseanografi/DATABASER/OKOKYST_2017/'
#task = "sognefjorden"
#leon = r"K:\Avdeling\214-Oseanografi\DATABASER\OKOKYST_2017\OKOKYST_NS_Nord_Leon\\"
def call_process(main_path, foldername):
path = os.path.join(main_path, foldername)
onedrive = path
files = glob.glob(path + '\*txt')
for f in files:
if 'OBS' not in f:
processStation(f,onedrive)
user = 'ELP'
main_path_RMS = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Norskehavet_Sor\RMS"
main_path_aqua = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Norskehavet_Sor\Aquakompetens"
#foldernames = [f for f in os.listdir(main_path) if re.match(r'2021', f)]
#RMS
#call_process(main_path_RMS,'06_2021')
#call_process('04-2021')
#call_process('06-2021')
#call_process('07-2021')
#call_process('08-2021')
#Aqua kompetanse
call_process(main_path_aqua,'2021-08')
# Sognefjorden 2021
main_path_sognefjorden = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Sognefjorden"
#foldername = "2021-01-25"
# Here the automatic assignment did not work, due to bad weather the CTD did not reach the bottom
#call_process(main_path_sognefjorden, "2021-02-17")
#manual_add_metadata_header(r"C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Sognefjorden\2021-02-17\2021-02-17 CTD data\Unknown_station2.txt", 'VT16')
#call_process(main_path_sognefjorden, '2021-03-14')
#call_process(main_path_sognefjorden, '2021-04-18')
#call_process(main_path_sognefjorden, '2021-05-19')
#call_process(main_path_sognefjorden, '2021-06-17')
#call_process(main_path_sognefjorden, '2021-07-14')
#call_process(main_path_sognefjorden, '2021-08-18')
main_path_hardangerfjorden = r'C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden'
#call_process(main_path_hardangerfjorden,'2021-01-18',survey = 'Hardangerfjorden_old')
#manual_add_metadata_header(r'C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden\2021-01-18\2021-01-18 CTD data\Unknown_station3.txt',
# "VT70")
#call_process(main_path_hardangerfjorden,'2021-02-23',survey = 'Hardangerfjorden_old')
#call_process(main_path_hardangerfjorden,'2021-03-22-23')#,survey = 'Hardangerfjorden_old'
#manual_add_metadata_header(r"C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden\2021-03-22-23\2021-03-22 CTD data\Unknown_station4.txt",
# 'VR49')
call_process(main_path_hardangerfjorden, "2021-04-20-21")
#call_process(main_path_hardangerfjorden, '2021-05-18-20')
#call_process(main_path_hardangerfjorden, '2021-06')
#call_process(main_path_hardangerfjorden, "2021-07")
#call_process(main_path_hardangerfjorden, '2021-08')
#Has to be checked, no oxygen! did not work
###call_process(main_path_hardangerfjorden, "2021-05-18-20")
#call_process(main_path_hardangerfjorden, "2021-07")
print ('\n\n')
##for f in foldernames:
## call_process(f)
|
import pathlib
from setuptools import find_packages, setup
HERE = pathlib.Path(__file__).parent
VERSION = '0.0.4'
PACKAGE_NAME = 'PyEzEmail'
AUTHOR = 'Pedro Lamarca'
AUTHOR_EMAIL = 'pedro.lamarca.1997@gmail.com'
URL = 'https://github.com/shinraxor'
LICENSE = 'MIT'
DESCRIPTION = 'Libreria para facilitar el envio de mails'
LONG_DESCRIPTION = (HERE / "README.md").read_text(encoding='utf-8')
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = []
setup(
name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
license=LICENSE,
packages=find_packages(),
include_package_data=True
)
|
# -*- coding: utf-8 -*-
import unittest
import datetime
from pyboleto.bank.santander import BoletoSantander
from .testutils import BoletoTestCase
class TestBancoSantander(BoletoTestCase):
def setUp(self):
self.dados = []
for i in range(3):
d = BoletoSantander()
d.agencia_cedente = '1333'
d.conta_cedente = '0707077'
d.data_vencimento = datetime.date(2012, 7, 22)
d.data_documento = datetime.date(2012, 7, 17)
d.data_processamento = datetime.date(2012, 7, 17)
d.valor_documento = 2952.95
d.nosso_numero = str(1234567 + i)
d.numero_documento = str(12345 + i)
d.ios = '0'
self.dados.append(d)
def test_linha_digitavel(self):
self.assertEqual(
self.dados[0].linha_digitavel,
'03399.07073 07700.000123 34567.901029 5 54020000295295'
)
def test_codigo_de_barras(self):
self.assertEqual(
self.dados[0].barcode,
'03395540200002952959070707700000123456790102'
)
def test_agencia(self):
self.assertEqual(self.dados[0].agencia_cedente, '1333')
def test_nosso_numero(self):
self.assertEqual(self.dados[0].nosso_numero, '000001234567')
self.assertEqual(self.dados[0].format_nosso_numero(), '000001234567-9')
suite = unittest.TestLoader().loadTestsFromTestCase(TestBancoSantander)
if __name__ == '__main__':
unittest.main()
|
"""
Demos the tricks on the bebop. Make sure you have enough room to perform them!
Author: Amy McGovern
"""
from pyparrot.Bebop import Bebop
bebop = Bebop()
print("connecting")
success = bebop.connect(10)
print(success)
print("sleeping")
bebop.smart_sleep(5)
bebop.ask_for_state_update()
bebop.safe_takeoff(10)
print("flip left")
print("flying state is %s" % bebop.sensors.flying_state)
success = bebop.flip(direction="left")
print("mambo flip result %s" % success)
bebop.smart_sleep(5)
print("flip right")
print("flying state is %s" % bebop.sensors.flying_state)
success = bebop.flip(direction="right")
print("mambo flip result %s" % success)
bebop.smart_sleep(5)
print("flip front")
print("flying state is %s" % bebop.sensors.flying_state)
success = bebop.flip(direction="front")
print("mambo flip result %s" % success)
bebop.smart_sleep(5)
print("flip back")
print("flying state is %s" % bebop.sensors.flying_state)
success = bebop.flip(direction="back")
print("mambo flip result %s" % success)
bebop.smart_sleep(5)
bebop.smart_sleep(5)
bebop.safe_land(10)
print("DONE - disconnecting")
bebop.disconnect()
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for Downpour."""
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import os
import signal
import subprocess
import time
import unittest
import sys
from op_test import OpTest
from paddle.fluid.trainer_desc import DistMultiTrainer
from paddle.fluid.device_worker import DownpourSGD, DownpourSGDOPT
from paddle.fluid.incubate.fleet.parameter_server.pslib.node import DownpourWorker, DownpourServer
from google.protobuf import text_format
import paddle.fluid.incubate.fleet.parameter_server.pslib.ps_pb2 as pslib
from paddle.fluid.trainer_factory import TrainerFactory
cache_path = os.path.expanduser('~/.cache/paddle/dataset')
class TestListenAndServOp(unittest.TestCase):
"""This class is Test Listen And ServOp."""
def setUp(self):
"""This function is set Up."""
if not os.path.exists(cache_path):
os.makedirs(cache_path)
def test_device_work_use_cvm(self):
"""test device work use_cvm."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
if not os.path.exists('{}/{}'.format(cache_path,
'fleet_desc.prototxt')):
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format(
cache_path)
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(input=x,
size=[1, 2],
is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("{}/fleet_desc.prototxt".format(cache_path)) as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGD"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = True
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
server = DownpourServer()
server.add_sparse_table(0, {})
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
def test_device_work(self):
"""This function is test devicve worker."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
if not os.path.exists('{}/{}'.format(cache_path,
'fleet_desc.prototxt')):
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format(
cache_path)
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(input=x,
size=[1, 2],
is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("{}/fleet_desc.prototxt".format(cache_path)) as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGD"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = False
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
def test_downpour_opt_work(self):
"""This function is test devicve worker."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
if not os.path.exists('{}/{}'.format(cache_path,
'fleet_desc.prototxt')):
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt -P {}/".format(
cache_path)
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(input=x,
size=[1, 2],
is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("{}/fleet_desc.prototxt".format(cache_path)) as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGDOPT"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = False
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
if __name__ == "__main__":
unittest.main()
|
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from typing import Callable, Union
from alibi_detect.utils.prediction import tokenize_transformer
def predict_batch(x: Union[list, np.ndarray, torch.Tensor], model: Union[Callable, nn.Module, nn.Sequential],
device: torch.device = None, batch_size: int = int(1e10), preprocess_fn: Callable = None,
dtype: Union[np.dtype, torch.dtype] = np.float32) -> Union[np.ndarray, torch.Tensor, tuple]:
"""
Make batch predictions on a model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
preprocess_fn
Optional preprocessing function for each batch.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array, torch tensor or tuples of those with model outputs.
"""
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
n = len(x)
n_minibatch = int(np.ceil(n / batch_size))
return_np = not isinstance(dtype, torch.dtype)
return_list = False
preds = [] # type: Union[list, tuple]
with torch.no_grad():
for i in range(n_minibatch):
istart, istop = i * batch_size, min((i + 1) * batch_size, n)
x_batch = x[istart:istop]
if isinstance(preprocess_fn, Callable): # type: ignore
x_batch = preprocess_fn(x_batch)
preds_tmp = model(x_batch.to(device)) # type: ignore
if isinstance(preds_tmp, (list, tuple)):
if len(preds) == 0: # init tuple with lists to store predictions
preds = tuple([] for _ in range(len(preds_tmp)))
return_list = isinstance(preds_tmp, list)
for j, p in enumerate(preds_tmp):
if device.type == 'cuda' and isinstance(p, torch.Tensor):
p = p.cpu()
preds[j].append(p if not return_np or isinstance(p, np.ndarray) else p.numpy())
elif isinstance(preds_tmp, (np.ndarray, torch.Tensor)):
if device.type == 'cuda' and isinstance(preds_tmp, torch.Tensor):
preds_tmp = preds_tmp.cpu()
preds.append(preds_tmp if not return_np or isinstance(preds_tmp, np.ndarray) # type: ignore
else preds_tmp.numpy())
else:
raise TypeError(f'Model output type {type(preds_tmp)} not supported. The model output '
f'type needs to be one of list, tuple, np.ndarray or torch.Tensor.')
concat = partial(np.concatenate, axis=0) if return_np else partial(torch.cat, dim=0)
out = tuple(concat(p) for p in preds) if isinstance(preds, tuple) else concat(preds)
if return_list:
out = list(out)
return out
def predict_batch_transformer(x: Union[list, np.ndarray], model: Union[nn.Module, nn.Sequential],
tokenizer: Callable, max_len: int, device: torch.device = None,
batch_size: int = int(1e10), dtype: Union[np.float32, torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor]:
"""
Make batch predictions using a transformers tokenizer and model.
Parameters
----------
x
Batch of instances.
model
PyTorch model.
tokenizer
Tokenizer for model.
max_len
Max sequence length for tokens.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with model outputs.
"""
preprocess_fn = partial(tokenize_transformer, tokenizer=tokenizer, max_len=max_len, backend='pt')
return predict_batch(x, model, device=device, preprocess_fn=preprocess_fn, batch_size=batch_size, dtype=dtype)
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmEvaluationRatingResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rating': 'float',
'rating_result_title': 'str',
'result_datatype_type_descriptor': 'str'
}
attribute_map = {
'rating': 'rating',
'rating_result_title': 'ratingResultTitle',
'result_datatype_type_descriptor': 'resultDatatypeTypeDescriptor'
}
def __init__(self, rating=None, rating_result_title=None, result_datatype_type_descriptor=None, _configuration=None): # noqa: E501
"""TpdmEvaluationRatingResult - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._rating = None
self._rating_result_title = None
self._result_datatype_type_descriptor = None
self.discriminator = None
self.rating = rating
self.rating_result_title = rating_result_title
self.result_datatype_type_descriptor = result_datatype_type_descriptor
@property
def rating(self):
"""Gets the rating of this TpdmEvaluationRatingResult. # noqa: E501
The numerical summary rating or score for the evaluation. # noqa: E501
:return: The rating of this TpdmEvaluationRatingResult. # noqa: E501
:rtype: float
"""
return self._rating
@rating.setter
def rating(self, rating):
"""Sets the rating of this TpdmEvaluationRatingResult.
The numerical summary rating or score for the evaluation. # noqa: E501
:param rating: The rating of this TpdmEvaluationRatingResult. # noqa: E501
:type: float
"""
if self._configuration.client_side_validation and rating is None:
raise ValueError("Invalid value for `rating`, must not be `None`") # noqa: E501
self._rating = rating
@property
def rating_result_title(self):
"""Gets the rating_result_title of this TpdmEvaluationRatingResult. # noqa: E501
The title of Rating Result. # noqa: E501
:return: The rating_result_title of this TpdmEvaluationRatingResult. # noqa: E501
:rtype: str
"""
return self._rating_result_title
@rating_result_title.setter
def rating_result_title(self, rating_result_title):
"""Sets the rating_result_title of this TpdmEvaluationRatingResult.
The title of Rating Result. # noqa: E501
:param rating_result_title: The rating_result_title of this TpdmEvaluationRatingResult. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and rating_result_title is None:
raise ValueError("Invalid value for `rating_result_title`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
rating_result_title is not None and len(rating_result_title) > 50):
raise ValueError("Invalid value for `rating_result_title`, length must be less than or equal to `50`") # noqa: E501
self._rating_result_title = rating_result_title
@property
def result_datatype_type_descriptor(self):
"""Gets the result_datatype_type_descriptor of this TpdmEvaluationRatingResult. # noqa: E501
The datatype of the result. The results can be expressed as a number, percentile, range, level, etc. # noqa: E501
:return: The result_datatype_type_descriptor of this TpdmEvaluationRatingResult. # noqa: E501
:rtype: str
"""
return self._result_datatype_type_descriptor
@result_datatype_type_descriptor.setter
def result_datatype_type_descriptor(self, result_datatype_type_descriptor):
"""Sets the result_datatype_type_descriptor of this TpdmEvaluationRatingResult.
The datatype of the result. The results can be expressed as a number, percentile, range, level, etc. # noqa: E501
:param result_datatype_type_descriptor: The result_datatype_type_descriptor of this TpdmEvaluationRatingResult. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and result_datatype_type_descriptor is None:
raise ValueError("Invalid value for `result_datatype_type_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
result_datatype_type_descriptor is not None and len(result_datatype_type_descriptor) > 306):
raise ValueError("Invalid value for `result_datatype_type_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._result_datatype_type_descriptor = result_datatype_type_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmEvaluationRatingResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmEvaluationRatingResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmEvaluationRatingResult):
return True
return self.to_dict() != other.to_dict()
|
"""Main configuration file"""
# Define the application directory
from os import getenv, path
from environs import Env
BASE_DIR = path.abspath(path.dirname(__file__))
env = Env()
current_env = getenv('BLOG_ENV') or 'local'
if not path.exists("{}.env".format(current_env)):
raise EnvironmentError("BLOG_ENV not set properly for {} env.".format(
current_env))
# loading the selected .env file
project_folder = path.expanduser(BASE_DIR)
env.read_env(path.join(project_folder, "{}.env".format(current_env)))
# ######################## #
# #### Configurations #### #
# ######################## #
DEBUG = env('DEBUG')
# MongoDB Configuration
MONGODB_DATABASE_URI = env("MONGODB_DATABASE_URI")
MAX_POOL_SIZE = env.int("MAX_POOL_SIZE")
MIN_POOL_SIZE = env.int("MIN_POOL_SIZE")
MAX_IDLE_TIME = env.int("MAX_IDLE_TIME")
CONNECTION_TIMEOUT = env.int("CONNECTION_TIMEOUT")
HEARTBEAT_FREQUENCY = env.int("HEARTBEAT_FREQUENCY")
SERVER_SELECTION_TIMEOUT = env.int("SERVER_SELECTION_TIMEOUT")
# Logs
LOG_LEVEL = env('LOG_LEVEL')
LOG_FILE_PATH = env('LOG_FILE_PATH')
# rabbitmq url
RABBITMQ_URL = env("RABBITMQ_URL")
|
"""
pdwBuilder
Author: Morgan Allison, Keysight RF/uW Application Engineer
Pulse Descriptor Word building functions for Analog and Vector UXGs.
"""
import math
import struct
import numpy as np
from pyarbtools import error
def convert_to_floating_point(inputVal, exponentOffset, mantissaBits, exponentBits):
"""
HELPER FUNCTION NOT WRITTEN BY THE AUTHORS
Computes modified floating point value represented by specified
floating point parameters.
fp = gain * mantissa^mantissaExponent * 2^exponentOffset
Returns:
Floating point value corresponding to passed parameters
"""
# Error check largest number that can be represented in specified number of bits
maxExponent = int((1 << exponentBits) - 1)
maxMantissa = np.uint32(((1 << mantissaBits) - 1))
exponent = int(math.floor(((math.log(inputVal) / math.log(2)) - exponentOffset)))
# mantissa = 0
if exponent > maxExponent:
# Too big to represent
exponent = maxExponent
mantissa = maxMantissa
elif exponent >= 0:
mantissaScale = int((1 << mantissaBits))
effectiveExponent = int(exponentOffset + exponent)
# ldexp(X, Y) is the same as matlab pow2(X, Y) = > X * 2 ^ Y
mantissa = np.uint32((((math.ldexp(inputVal, - effectiveExponent) - 1) * mantissaScale) + 0.5))
if mantissa > maxMantissa:
# Handle case where rounding causes the mantissa to overflow
if exponent < maxExponent:
# Still representable
mantissa = 0
exponent += 1
else:
# Handle slightly-too-big to represent case
mantissa = maxMantissa
else:
# Too small to represent
mantissa = 0
exponent = 0
return ((np.uint32(exponent)) << mantissaBits) | mantissa
def closest_m_2_n(inputVal, mantissaBits):
"""
HELPER FUNCTION NOT WRITTEN BY THE AUTHORS
Converts the specified value to the hardware representation in Mantissa*2^Exponent form
Returns:
"""
success = True
# exponent = 0
# mantissa = 0
maxMantissa = np.uint32((1 << mantissaBits) - 1)
# inputVal <= mantissa max inputVal have exponent=0
if inputVal < (maxMantissa + 0.5):
exponent = 0
mantissa = np.uint32((inputVal + 0.5))
if mantissa > maxMantissa:
mantissa = maxMantissa
else: # exponent > 0 (for value_ins that will have exponent>0 after rounding)
# find exponent
mantissaOut, possibleExponent = math.frexp(inputVal)
possibleExponent -= mantissaBits
# determine mantissa
fracMantissa = float(inputVal / (1 << possibleExponent))
# round to next N if that is closer
if fracMantissa > (maxMantissa + 0.5 - 1e-9):
mantissa = 1 << (mantissaBits - 1)
possibleExponent += 1
else: # round mantissa to nearest
mantissa = np.uint32((fracMantissa + 0.5))
# do not exceed maximum mantissa
if mantissa > maxMantissa:
mantissa = maxMantissa
exponent = np.uint32(possibleExponent)
return success, exponent, mantissa
def chirp_closest_m_2_n(chirpRate, chirpRateRes=21.822):
"""
HELPER FUNCTION NOT WRITTEN BY THE AUTHORS
Convert the specified value to the hardware representation in Mantissa*2^Exponent form for Chirp parameters
NOTE: I am not sure why the conversion factor of 21.82 needs to be there, but the math works out perfectly
"""
output = np.uint32(0)
mantissaBits = 13
mantissaMask = np.uint32((1 << mantissaBits) - 1)
# convert to clocks
chirpValue = float(chirpRate) / float(chirpRateRes)
success, exponent, mantissa = closest_m_2_n(chirpValue, mantissaBits)
# compensate for exponent being multiplied by 2
if exponent & 0x01:
exponent += 1
exponent >>= 1
mantissa = np.uint32(mantissa / 2)
else:
exponent >>= 1
if success:
# print(exponent)
# print(mantissaBits)
# print(mantissa)
# print(mantissaMask)
output = np.uint32((exponent << mantissaBits) | (mantissa & mantissaMask))
return output
def analog_bin_pdw_builder(operation=0, freq=1e9, phase=0, startTimeSec=0, width=0, powerLin=1, markers=0, pulseMode=2, phaseControl=0,
bandAdjust=0, chirpControl=0, code=0, chirpRate=0, freqMap=0):
"""
This function builds a single format-1 PDW from a list of parameters.
See User's Guide>Streaming Use>PDW Definitions section of
Keysight UXG X-Series Agile Signal Generator Online Documentation
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa/n519xa.htm
Args:
operation (int): Specifies the operation of the PDW. (0-none, 1-first PDW, 2-last PDW)
freq (float): CW frequency of PDW.
phase (float): Phase of CW frequency of PDW.
startTimeSec (float): Start time of the 50% rising edge power.
width (float): Width of the pulse from 50% rise power to 50% fall power.
powerLin (float): Linear scaling of the output in Vrms. (basically just leave this at 1)
markers (int): Bit mask input of active markers (e.g. to activate marker 3, send the number 4, which is 0100 in binary).
pulseMode (int): Configures pulse mode. (0-CW, 1-RF off, 2-Pulse enabled)
phaseControl (int): Switches between phase mode. (0-coherent, 1-continuous)
bandAdjust (int): Configures band adjustment criteria. (0-CW switch pts, 1-upper band, 2-lower band).
chirpControl (int): Configures chirp shape. (0-stiched ramp, 1-triangle, 2-ramp)
code (int): Selects hard-coded frequency/phase coding table index.
chirpRate (float): Chirp rate in Hz/us.
freqMap (int): Selects frequency band map. (0-A, 6-B)
Returns:
(NumPy array): Single PDW that can be used to build a PDW file or streamed directly to the UXG.
"""
pdwFormat = 1
_freq = int(freq * 1024 + 0.5)
if 180 < phase <= 360:
phase -= 360
_phase = int(phase * 4096 / 360 + 0.5)
_startTimePs = int(startTimeSec * 1e12)
_widthNs = int(width * 1e9)
_power = convert_to_floating_point(powerLin, -26, 10, 5)
_chirpRate = chirp_closest_m_2_n(chirpRate)
# Build PDW
pdw = np.zeros(7, dtype=np.uint32)
# Word 0: Mask pdw format (3 bits), operation (2 bits), and the lower 27 bits of freq
pdw[0] = (pdwFormat | operation << 3 | (_freq << 5 & 0xFFFFFFFF))
# Word 1: Mask the upper 20 bits (47 - 27) of freq and phase (12 bits)
pdw[1] = (_freq >> 27 | _phase << 20) & 0xFFFFFFFF
# Word 2: Lower 32 bits of startTimePs
pdw[2] = _startTimePs & 0xFFFFFFFF
# Word 3: Upper 32 bits of startTimePS
pdw[3] = (_startTimePs & 0xFFFFFFFF00000000) >> 32
# Word 4: Pulse Width (32 bits)
pdw[4] = _widthNs
# Word 5: Mask power (15 bits), markers (12 bits), pulseMode (2 bits), phaseControl (1 bit), and bandAdjust (2 bits)
pdw[5] = _power | markers << 15 | pulseMode << 27 | phaseControl << 29 | bandAdjust << 30
# Word 6: Mask wIndex (16 bits), 12 reserved bits, and wfmMkrMask (4 bits)
pdw[6] = chirpControl | code << 3 | _chirpRate << 12 | freqMap << 29
return pdw
# noinspection PyRedundantParentheses
def create_padding_block(sizeOfPaddingAndHeaderInBytes):
"""
Creates an analog UXG binary padding block with header. The padding block
is used to align binary blocks as needed so each block starts on a 16 byte
boundary. This padding block is also used to align PDW streaming data on
4096 byte boundaries.
Args:
sizeOfPaddingAndHeaderInBytes (int): Total size of resulting padding
binary block and header combined.
Returns:
binary block containing padding header and padded data
"""
paddingHeaderSize = 16
paddingFillerSize = sizeOfPaddingAndHeaderInBytes - paddingHeaderSize
padBlockId = (1).to_bytes(4, byteorder='little')
res3 = (0).to_bytes(4, byteorder='little')
size = (paddingFillerSize).to_bytes(8, byteorder='little')
# Padding Header Above = 16 bytes
# X bytes of padding required to ensure PDW stream contents
# (not PDW header) starts @ byte 4097 or (multiple of 4096)+1
padData = (0).to_bytes(paddingFillerSize, byteorder='little')
padding = [padBlockId, res3, size, padData]
return padding
# noinspection PyDefaultArgument,PyRedundantParentheses,PyRedundantParentheses,PyRedundantParentheses,PyRedundantParentheses
def bin_freqPhaseCodingSingleEntry(onOffState=0, numBitsPerSubpulse=1, codingType=0, stateMapping=[0, 180],
hexPatternString="E2", comment="default Comment"):
"""
Creates a single entry binary frequency and phase coding block
for analog UXG streaming. This is only part of a full frequency and phase coding
block with multiple entries for each pattern to be streamed to UXG.
Args:
onOffState (int): Activation state for current FPC entry
numBitsPerSubpulse (int): = number of bits per subpulse. E.g. For BPSK, this is 1
codingType (int): 0=phase coding, 1= frequency coding, 2 = both phase and frequency coding
stateMapping (double array): 2^numBitsPerSubpulse entries of phase / freq states
hexPatternString (string): Hex values to encode in FPC table e.g. "A2F4" multiple of 2 in length
comment (string): FPC entry name
Returns:
binary array containing bytes for a single frequency phase entry
TODO - Combination of simultaneous phase and frequency modulation not yet implemented
"""
if ((len(hexPatternString) % 2) != 0):
raise error.UXGError('Hex pattern length must be a multiple of 2: Length is ' + str(len(hexPatternString)))
hexPatternBytes = bytearray.fromhex(hexPatternString)
numBitsInPattern = 8 * len(hexPatternBytes)
if (codingType != 0 and codingType != 1):
raise error.UXGError('Only phase and frequency coding via streaming has been implemented in this example')
if (numBitsPerSubpulse != 1):
raise error.UXGError('Only one bit per subpulse has been implemented in this example')
if (len(hexPatternBytes) > 8192):
raise error.UXGError('Pattern must be less than 8192 bytes')
if (len(comment) > 60):
raise error.UXGError('Comment must be less than 60 characters long')
entryState = onOffState.to_bytes(1, byteorder='little')
numBitsPerSub = numBitsPerSubpulse.to_bytes(1, byteorder='little')
modType = codingType.to_bytes(1, byteorder='little')
numBytesInComment = len(comment).to_bytes(1, byteorder='little')
numBitsInPat = numBitsInPattern.to_bytes(4, byteorder='little')
fpcBin = entryState + numBitsPerSub + modType + numBytesInComment + numBitsInPat
# Convert double array to little endian byte array - 8 bytes per double value
for phaseOrFreq in stateMapping:
doubleByteArrayPhase = bytearray(struct.pack("<d", phaseOrFreq))
# arraySize = len(doubleByteArrayPhase)
fpcBin = fpcBin + doubleByteArrayPhase
fpcBin = fpcBin + hexPatternBytes
# Translate comment to char[]
commentEncoded = bytearray(comment, 'utf-8')
fpcBin = fpcBin + commentEncoded
return fpcBin
# noinspection PyRedundantParentheses,PyRedundantParentheses,PyRedundantParentheses,PyRedundantParentheses
def bin_pdw_freqPhaseCodingBlock():
"""
Creates a complete frequency and phase coding block containing header and data
for analog UXG streaming.
This block is used to describe variable length pulse frequency/phase coding setups.
This allows frequency and phase coding tables to be updated over ethernet streaming
instead of having to send SCPI commands.
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa/n519xa.htm#User's%20Guide/Streaming%20Mode%20File%20Format%20Definition.htm%3FTocPath%3DUser's%2520Guide%7CStreaming%2520Mode%2520Use%7C_____5
currently hardcoded to create FCP block with 3 fixed entries
first entry is index 0 in FPC table - no coding
second entry is index 1 in FPC table - PSK
third entry is index 2 in FPC table - FSK
Returns (bytearray):
Bytearray containing full FCP block with header.
"""
numEntries = 3
freqPhaseBlockId = (13).to_bytes(4, byteorder='little')
reserved1 = (0).to_bytes(4, byteorder='little')
# Size calculated last
version = (2).to_bytes(4, byteorder='little')
numberOfEntries = numEntries.to_bytes(4, byteorder='little')
entry0 = bin_freqPhaseCodingSingleEntry(0, 1, 0, [0, 180], "", "NoCodingFirstEntry")
entry1 = bin_freqPhaseCodingSingleEntry(1, 1, 0, [0, 180], "2A61D327", "PSKcode32bits")
entry2 = bin_freqPhaseCodingSingleEntry(1, 1, 1, [-10e6, 10e6], "5AC4", "FSKcodeTest16bits")
# Size does not include blockID and reserved fields 8 bytes
sizeInBytes = len(version) + len(numberOfEntries) + len(entry0) + len(entry1) + len(entry2)
sizeBlock = sizeInBytes.to_bytes(8, byteorder='little')
returnBlock = [freqPhaseBlockId, reserved1, sizeBlock, version, numberOfEntries, entry0, entry1, entry2]
# fpcBlock size must be a multiple of 16 to be on proper byte boundary - Add padding as needed
tempSize = len(b''.join(returnBlock))
sizeOfEndBufferBytes = 16 - (tempSize % 16)
endFpcBlockBufferBytes = (0).to_bytes(sizeOfEndBufferBytes, byteorder='little')
returnBlockWithPadding = [freqPhaseBlockId, reserved1, sizeBlock, version, numberOfEntries, entry0, entry1, entry2,
endFpcBlockBufferBytes]
return returnBlockWithPadding
# noinspection PyRedundantParentheses
def analog_bin_pdw_file_builder(pdwList):
"""
Builds a binary PDW file with a padding block to ensure the
PDW section begins at an offset of 4096 bytes (required by UXG).
See User's Guide>Streaming Use>PDW File Format section of
Keysight UXG X-Series Agile Signal Generator Online Documentation
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa/n519xa.htm
Args:
pdwList (list): List of lists. Each inner list contains a single pulse descriptor word.
Returns:
(bytes): Binary data that contains a full PDW file that can be downloaded to and played out of the UXG.
"""
# Include frequency phase coding block flag: 1 = yes, 0 = no
includeFpcBlock = 1
# Header section, all fixed values
fileId = b'STRM'
version = (1).to_bytes(4, byteorder='little')
# First field is first block of 4096 bytes. If frequency phase coding block is large,
# this offset to the start of PDW data might extend past first 4096 sized block
fieldBlock = 1
offset = ((fieldBlock >> 1) & 0x3fffff).to_bytes(4, byteorder='little')
magic = b'KEYS'
res0 = (0).to_bytes(16, byteorder='little')
flags = (0).to_bytes(4, byteorder='little')
uniqueId = (0).to_bytes(4, byteorder='little')
dataId = (16).to_bytes(4, byteorder='little')
res1 = (0).to_bytes(4, byteorder='little')
header = [fileId, version, offset, magic, res0, flags, uniqueId, dataId, res1]
tempHeaderSize = len(b''.join(header))
# FPC Block - skip fpcBlock if flag is zero
fpcBlock = [b'']
if (includeFpcBlock):
fpcBlock = bin_pdw_freqPhaseCodingBlock()
fpcBlockSize = len(b''.join(fpcBlock))
# PDW block header must start at byte 4080 so PDW stream data starts at byte 4097
paddingSize = 4080 - tempHeaderSize - fpcBlockSize
paddingBlock = create_padding_block(paddingSize)
# PDW block header = 16 bytes
pdwBlockId = (16).to_bytes(4, byteorder='little')
res4 = (0).to_bytes(4, byteorder='little')
pdwSize = (0xffffffffffffffff).to_bytes(8, byteorder='little')
pdwBlock = [pdwBlockId, res4, pdwSize]
# Build Raw PDW Data from list
rawPdwData = [analog_bin_pdw_builder(*p) for p in pdwList]
# Add 8 bytes of zero to make sure PDW block ends on 16 byte boundary.
rawPdwData += [(0).to_bytes(8, byteorder='little')]
pdwEndBlock = [(0).to_bytes(16, byteorder='little')]
# Build PDW file from header, padBlock, pdwBlock, and PDWs
pdwFile = header + fpcBlock + paddingBlock + pdwBlock + rawPdwData + pdwEndBlock
# Convert arrays of data to a single byte-type variable
pdwFile = b''.join(pdwFile)
return pdwFile
# noinspection PyPep8
def vector_bin_pdw_builder_3(operation=0, freq=1e9, phase=0, startTimeSec=0, width=10e-6, maxPower=0, markers=0, powerDbm=0,
phaseControl=0, rfOff=0, autoBlank=0, zeroHold=0, loLead=0, wfmMkrMask=0, wIndex=0):
"""
This function builds a single format-3 PDW from a list of parameters.
See User's Guide>Streaming Use>PDW Definitions section of
Keysight UXG X-Series Agile Vector Adapter Online Documentation
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa-vector/n519xa-vector.htm
Args:
operation (int): Specifies the operation of the PDW. (0-none, 1-first PDW, 2-last PDW)
freq (float): CW frequency of PDW.
phase (float): Phase of CW frequency of PDW.
startTimeSec (float): Start time of the 50% rising edge power.
width (float): Width of the pulse from 50% rise power to 50% fall power.
maxPower (float): Max output power in dBm.
markers (int): Enables or disables PDW markers via bit masking. (e.g. to activate marker 3, send the number 4, which is 0100 in binary).
powerDbm (float): Sets power for individual PDW in dBm.
phaseControl (int): Switches between phase mode. (0-coherent, 1-continuous)
rfOff (int): Activates or deactivates RF Off mode. (0-RF on, 1-RF off). I know, the nomenclature here is TRASH.
autoBlank (int): Activates blanking. (0-no blanking, 1-blanking)
zeroHold (int): Selects zero/hold behavior. (0-zero, 1-hold last value)
loLead (float): Specifies how long before the PDW start time to begin switching LO.
wfmMkrMask (int): Enables or disables waveform markers via bit masking. (e.g. to activate marker 3, send the number 4, which is 0100 in binary).
wIndex (int): Index of the IQ waveform to be assigned to the PDW.
Returns:
(NumPy array): Single PDW that can be used to build a PDW file or streamed directly to the UXG.
"""
pdwFormat = 3
_freq = int(freq * 1024 + 0.5)
_phase = int(phase * 4096 / 360 + 0.5)
_startTimePs = int(startTimeSec * 1e12)
# you multiplied this by 2, it's probably not going to work.
_pulseWidthPs = int(width * 1e12 * 2)
_maxPower = int((maxPower + 140) / 0.005 + 0.5)
_power = int((powerDbm + 140) / 0.005 + 0.5)
_loLead = int(loLead / 4e-9)
_newWfm = 1
_wfmType = 0
# Build PDW
pdw = np.zeros(11, dtype=np.uint32)
# Word 0: Mask pdw format (3 bits), operation (2 bits), and the lower 27 bits of freq
pdw[0] = (pdwFormat | operation << 3 | _freq << 5) & 0xFFFFFFFF
# Word 1: Mask the upper 20 bits (47 - 27) of freq and phase (12 bits)
pdw[1] = (_freq >> 27 | _phase << 20) & 0xFFFFFFFF
# Word 2: Lower 32 bits of startTimePs
pdw[2] = _startTimePs & 0xFFFFFFFF
# Word 3: Upper 32 bits of startTimePS
pdw[3] = (_startTimePs & 0xFFFFFFFF00000000) >> 32
# Word 4: Lower 32 bits of Pulse width (37 bits)
pdw[4] = _pulseWidthPs & 0xFFFFFFFF
# Word 5: Upper 5 bits of Pulse width, max power (15 bits), markers (12 bits)
pdw[5] = (_pulseWidthPs & 0x1F00000000) >> 32 | _maxPower << 5 | markers << 20
# Word 5: Power (15 bits), phase mode (1), RF off (1), auto blank (1), new wfm (1),
# zero/hold (1), lo lead (8), marker mask (4)
pdw[
6] = _power | phaseControl << 15 | rfOff << 16 | autoBlank << 17 | _newWfm << 18 | zeroHold << 19 | _loLead << 20 | wfmMkrMask << 28
# Word 7: Reserved (8), Wfm type (2), index (16) reserved (
pdw[7] = _wfmType << 8 | wIndex << 10
return pdw
def vector_bin_pdw_builder(operation, freq, phase, startTimeSec, powerDbm, markers, phaseControl, rfOff, wIndex, wfmMkrMask):
"""
This function builds a single format-1 PDW from a list of parameters.
PDW format-1 is now deprecated. This format is still supported as legacy
See User's Guide>Streaming Use>PDW Definitions section of
Keysight UXG X-Series Agile Vector Adapter Online Documentation
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa-vector/n519xa-vector.htm
Args:
operation (int): Specifies the operation of the PDW. (0-none, 1-first PDW, 2-last PDW)
freq (float): CW frequency of PDW.
phase (float): Phase of CW frequency of PDW.
startTimeSec (float): Start time of the 50% rising edge power.
powerDbm (float): Sets power for individual PDW in dBm.
markers (int): Enables or disables PDW markers via bit masking. (e.g. to activate marker 3, send the number 4, which is 0100 in binary).
phaseControl (int): Switches between phase mode. (0-coherent, 1-continuous)
rfOff (int): Activates or deactivates RF Off mode. (0-RF on, 1-RF off). I know, the nomenclature here is TRASH.
wIndex (int): Index of the IQ waveform to be assigned to the PDW.
wfmMkrMask (int): Enables or disables waveform markers via bit masking. (e.g. to activate marker 3, send the number 4, which is 0100 in binary).
Returns:
(NumPy array): Single PDW that can be used to build a PDW file or streamed directly to the UXG.
"""
# Format 1 PDWs are deprecated
pdwFormat = 1
_freq = int(freq * 1024 + 0.5)
_phase = int(phase * 4096 / 360 + 0.5)
_startTimePs = int(startTimeSec * 1e12)
_power = int((powerDbm + 140) / 0.005 + 0.5)
# Build PDW
pdw = np.zeros(6, dtype=np.uint32)
# Word 0: Mask pdw format (3 bits), operation (2 bits), and the lower 27 bits of freq
pdw[0] = (pdwFormat | operation << 3 | _freq << 5) & 0xFFFFFFFF
# Word 1: Mask the upper 20 bits (47 - 27) of freq and phase (12 bits)
pdw[1] = (_freq >> 27 | _phase << 20) & 0xFFFFFFFF
# Word 2: Lower 32 bits of startTimePs
pdw[2] = _startTimePs & 0xFFFFFFFF
# Word 3: Upper 32 bits of startTimePS
pdw[3] = (_startTimePs & 0xFFFFFFFF00000000) >> 32
# Word 4: Mask power (15 bits), markers (12 bits), phaseControl (1 bit), and rfOff (1 bit)
pdw[4] = _power | markers << 15 | phaseControl << 27 | rfOff << 28
# Word 5: Mask wIndex (16 bits), 12 reserved bits, and wfmMkrMask (4 bits)
pdw[5] = wIndex | 0b000000000000 << 16 | wfmMkrMask << 28
return pdw
# noinspection PyRedundantParentheses
def vector_bin_pdw_file_builder(pdwList):
"""
Builds a binary PDW file with a padding block to ensure the
PDW section begins at an offset of 4096 bytes (required by UXG).
See User's Guide>Streaming Use>PDW Definitions section of
Keysight UXG X-Series Agile Vector Adapter Online Documentation
http://rfmw.em.keysight.com/wireless/helpfiles/n519xa-vector/n519xa-vector.htm
Args:
pdwList (list): List of lists. Each inner list contains a single
pulse descriptor word.
Returns:
(bytes): Binary data that contains a full PDW file that can
be downloaded to and played out of the UXG.
"""
# Header section, all fixed values
fileId = b'STRM'
version = (1).to_bytes(4, byteorder='little')
# No reason to have > one 4096 byte offset to PDW data.
offset = ((1 << 1) & 0x3fffff).to_bytes(4, byteorder='little')
magic = b'KEYS'
res0 = (0).to_bytes(16, byteorder='little')
flags = (0).to_bytes(4, byteorder='little')
uniqueId = (0).to_bytes(4, byteorder='little')
dataId = (64).to_bytes(4, byteorder='little')
res1 = (0).to_bytes(4, byteorder='little')
header = [fileId, version, offset, magic, res0, flags, uniqueId, dataId, res1]
# Padding block, all fixed values
padBlockId = (1).to_bytes(4, byteorder='little')
res3 = (0).to_bytes(4, byteorder='little')
size = (4016).to_bytes(8, byteorder='little')
# 4016 bytes of padding ensures that the first PDw begins @ byte 4097
padData = (0).to_bytes(4016, byteorder='little')
padding = [padBlockId, res3, size, padData]
# PDW block
pdwBlockId = (16).to_bytes(4, byteorder='little')
res4 = (0).to_bytes(4, byteorder='little')
pdwSize = (0xffffffffffffffff).to_bytes(8, byteorder='little')
pdwBlock = [pdwBlockId, res4, pdwSize]
# Build PDW file from header, padBlock, pdwBlock, and PDWs
pdwFile = header + padding + pdwBlock
pdwFile += [vector_bin_pdw_builder(*p) for p in pdwList]
# Convert arrays of data to a single byte-type variable
pdwFile = b''.join(pdwFile)
return pdwFile
|
from abc import ABCMeta, abstractmethod
class Animal(metaclass=ABCMeta):
@abstractmethod
def som_animal(self):
return 'som de algum outro animal'
pass
class Cachorro(Animal):
def som_animal(self):
s = super(Cachorro, self).som_animal()
return '%s - %s' % (s, 'AUAU')
c = Cachorro()
print(c.som_animal())
|
import pytest
import datetime
import random
from OfficeActions import OfficeActions
from models import OfficeModel
from config import *
class TestOfficeActions():
default_usa_state = "MA"
default_office_code = default_usa_state + "-18"
default_usa_state2 = "NH"
default_office_code2 = default_usa_state2 + "-03"
# TODO: We need a way to distinguish states/offices that should be
# shown in state picker and the ones that should not.
# We could do a pseudo-state like for "Fed" and filter those out.
default_hoss_usa_state = "HOSS"
default_hoss_office_code = default_hoss_usa_state + "-01"
number_of_seconds_in_a_day = 24*60*60
def setup_method(self, method):
OfficeActions.delete()
def test_create(self):
office = OfficeActions.create(
usa_state = self.default_usa_state,
office_code=self.default_office_code
)
total_seconds_since_created = (datetime.datetime.now()-office.created_at).total_seconds()
total_seconds_since_updated = (datetime.datetime.now()-office.updated_at).total_seconds()
assert office.office_code == self.default_office_code
assert(office.usa_state == self.default_usa_state)
assert(total_seconds_since_created < 1)
assert(total_seconds_since_updated < 1)
def test_create_hoss(self):
office = OfficeActions.create(
usa_state = self.default_hoss_usa_state,
office_code=self.default_hoss_office_code
)
total_seconds_since_created = (datetime.datetime.now()-office.created_at).total_seconds()
total_seconds_since_updated = (datetime.datetime.now()-office.updated_at).total_seconds()
assert office.office_code == self.default_hoss_office_code
assert(office.usa_state == self.default_hoss_usa_state)
assert (total_seconds_since_created < 1)
assert(total_seconds_since_updated < 1)
def test_get_given_office_created(self):
OfficeActions.create(
office_code=self.default_office_code,
usa_state = self.default_usa_state
)
retrieved_office = OfficeActions.get_by_code(office_code=self.default_office_code)
assert(retrieved_office.office_code == self.default_office_code)
assert(retrieved_office.usa_state == self.default_usa_state)
def test_get_offices(self):
OfficeActions.create(
usa_state = self.default_usa_state,
office_code=self.default_office_code
)
OfficeActions.create(
usa_state = self.default_usa_state2,
office_code=self.default_office_code2
)
found = False
actual_offices = OfficeActions.get_offices()
for office in actual_offices:
if office["office_code"] == self.default_office_code2:
found = True
assert(office["office_code"] == self.default_office_code2)
assert(office["usa_state"] == self.default_usa_state2)
assert(found)
# def test_controllers_get_office_by_uuid(self):
# unique_office_code = random.randint(1,1000000)
# created_office = OfficeActions.create( "OH", unique_office_code , "OH06")
# actual_office=controllers_get_office_by_uuid(created_office.uuid)
# assert(actual_office['usa_state'] == "OH")
# assert(actual_office['office_code'] == "OH06")
# assert(actual_office['office_code'] == unique_office_code)
# assert(actual_office['uuid'] == created_office.uuid)
# def test_update_office(self):
# unique_office_code = random.randint(1,1000000)
# created_office = OfficeActions.create( "OH", unique_office_code , "OH06")
# actual_office=OfficeActions.get_office_by_uuid(created_office.uuid)
# usa_state = "VA"
# office_code = "031E"
# office_code = random.randint(1,1000000)
# uuid = actual_office.uuid
# updated_office = OfficeActions.update_office(uuid, usa_state, office_code , office_code)
# refreshed_actual_office=OfficeActions.get_office_by_uuid(created_office.uuid)
# assert(refreshed_actual_office.usa_state == usa_state)
# assert(refreshed_actual_office.office_code == office_code)
# assert(refreshed_actual_office.office_code == office_code)
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
import abc
import functools
import six
from six.moves import urllib
from keystoneclient import auth
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient.openstack.common.apiclient import base
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
def filter_none(**kwargs):
"""Remove any entries from a dictionary where the value is None."""
return dict((k, v) for k, v in six.iteritems(kwargs) if v is not None)
def filter_kwargs(f):
@functools.wraps(f)
def func(*args, **kwargs):
new_kwargs = {}
for key, ref in six.iteritems(kwargs):
if ref is None:
# drop null values
continue
id_value = getid(ref)
if id_value != ref:
# If an object with an id was passed, then use the id, e.g.:
# user: user(id=1) becomes user_id: 1
key = '%s_id' % key
new_kwargs[key] = id_value
return f(*args, **new_kwargs)
return func
class Manager(object):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes Manager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(Manager, self).__init__()
self.client = client
@property
def api(self):
"""Deprecated. Use `client` instead.
"""
return self.client
def _list(self, url, response_key, obj_class=None, body=None, **kwargs):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param kwargs: Additional arguments will be passed to the request.
"""
if body:
resp, body = self.client.post(url, body=body, **kwargs)
else:
resp, body = self.client.get(url, **kwargs)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key, **kwargs):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.get(url, **kwargs)
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url, **kwargs):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.head(url, **kwargs)
return resp.status_code == 204
def _create(self, url, body, response_key, return_raw=False, **kwargs):
"""Deprecated. Use `_post` instead.
"""
return self._post(url, body, response_key, return_raw, **kwargs)
def _post(self, url, body, response_key, return_raw=False, **kwargs):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.post(url, body=body, **kwargs)
if return_raw:
return body[response_key]
return self.resource_class(self, body[response_key])
def _put(self, url, body=None, response_key=None, **kwargs):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.put(url, body=body, **kwargs)
# PUT requests may not return a body
if body is not None:
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, body=None, response_key=None, **kwargs):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param kwargs: Additional arguments will be passed to the request.
"""
resp, body = self.client.patch(url, body=body, **kwargs)
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url, **kwargs):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
:param kwargs: Additional arguments will be passed to the request.
"""
return self.client.delete(url, **kwargs)
def _update(self, url, body=None, response_key=None, method="PUT",
management=True, **kwargs):
methods = {"PUT": self.client.put,
"POST": self.client.post,
"PATCH": self.client.patch}
try:
resp, body = methods[method](url, body=body,
management=management,
**kwargs)
except KeyError:
raise exceptions.ClientException(_("Invalid update method: %s")
% method)
# PUT requests may not return a body
if body:
return self.resource_class(self, body[response_key])
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(Manager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
rl = self.findall(**kwargs)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(kwargs)s.") % {
'name': self.resource_class.__name__, 'kwargs': kwargs}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(Manager):
"""Base manager class for manipulating Keystone entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
base_url = None
def build_url(self, dict_args_in_out=None):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
If a `base_url` is provided, the generated URL will be appended to it.
"""
if dict_args_in_out is None:
dict_args_in_out = {}
url = dict_args_in_out.pop('base_url', None) or self.base_url or ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = dict_args_in_out.pop('%s_id' % self.key, None)
if entity_id is not None:
url += '/%s' % entity_id
return url
@filter_kwargs
def create(self, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
return self._create(
url,
{self.key: kwargs},
self.key)
@filter_kwargs
def get(self, **kwargs):
return self._get(
self.build_url(dict_args_in_out=kwargs),
self.key)
@filter_kwargs
def head(self, **kwargs):
return self._head(self.build_url(dict_args_in_out=kwargs))
def _build_query(self, params):
return '?%s' % urllib.parse.urlencode(params) if params else ''
@filter_kwargs
def list(self, fallback_to_auth=False, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
try:
query = self._build_query(kwargs)
url_query = '%(url)s%(query)s' % {'url': url, 'query': query}
return self._list(
url_query,
self.collection_key)
except exceptions.EmptyCatalog:
if fallback_to_auth:
return self._list(
url_query,
self.collection_key,
endpoint_filter={'interface': auth.AUTH_INTERFACE})
else:
raise
@filter_kwargs
def put(self, **kwargs):
return self._update(
self.build_url(dict_args_in_out=kwargs),
method='PUT')
@filter_kwargs
def update(self, **kwargs):
url = self.build_url(dict_args_in_out=kwargs)
return self._update(
url,
{self.key: kwargs},
self.key,
method='PATCH')
@filter_kwargs
def delete(self, **kwargs):
return self._delete(
self.build_url(dict_args_in_out=kwargs))
@filter_kwargs
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``."""
url = self.build_url(dict_args_in_out=kwargs)
query = self._build_query(kwargs)
rl = self._list(
'%(url)s%(query)s' % {
'url': url,
'query': query,
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(kwargs)s.") % {
'name': self.resource_class.__name__, 'kwargs': kwargs}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Resource(base.Resource):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
def delete(self):
return self.manager.delete(self)
|
"""Solution for Advent of Code day 20."""
from pathlib import Path
import doctest
import click
def read_input(filename: Path) -> tuple[str, set[int, int]]:
"""read the input from the scanners.
Args:
filename (Path): filename
Returns:
str : rules
set[int,int]: input image
"""
with filename.open("r") as file:
data = file.read().strip()
rule, start = data.split("\n\n")
rule = rule.strip()
input_image = set()
for row, line in enumerate(start.strip().split("\n")):
for col, x in enumerate(line.strip()):
if x == "#":
input_image.add((row, col))
return rule, input_image
def apply_image_enhancement(image: set[int, int], on: bool, rule: str) -> set[int, int]:
"""Applies the image enhancement to the image.
Args:
image (set[int,int]): input image
on (bool): Flag that indicates if the pixel in image indecates on or off
rule(str): image engancement rules
Returns:
set[int,int] processed image
"""
result = set()
row_low = min([row for row, _ in image])
row_high = max([row for row, _ in image])
col_low = min([col for _, col in image])
col_high = max([col for _, col in image])
for row in range(row_low - 5, row_high + 10):
for col in range(col_low - 5, col_high + 10):
row_col_str = 0
bit = 8
for delta_row in [-1, 0, 1]:
for delta_col in [-1, 0, 1]:
if ((row + delta_row, col + delta_col) in image) == on:
row_col_str += 2 ** bit
bit -= 1
assert 0 <= row_col_str < 512
if (rule[row_col_str] == "#") != on:
result.add((row, col))
return result
@click.group()
def main():
"""CLI for the solution of day 20
Advent of code 2021 (https://adventofcode.com/2021/day/20)
"""
@main.command()
@click.argument(
"filename",
required=False,
type=Path,
default=Path("test_data/day_20.data"),
)
def part_1(filename: Path):
"""Part one of day 20. (2 runs)"""
rule, image = read_input(filename)
for i in range(2):
image = apply_image_enhancement(image, i % 2 == 0, rule)
print(f"{len(image)} pixels are lit after 2 rounds")
@main.command()
@click.argument(
"filename",
required=False,
type=Path,
default=Path("test_data/day_20.data"),
)
def part_2(filename: Path):
"""Part two of day 20. (50 runs)"""
rule, image = read_input(filename)
for i in range(50):
image = apply_image_enhancement(image, i % 2 == 0, rule)
print(f"{len(image)} pixels are lit after 50 rounds")
@main.command()
def test():
"""run doctest."""
print(doctest.testmod())
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import sys
import click
import tornado.ioloop
from streamlit import config
from streamlit import net_util
from streamlit import url_util
from streamlit import env_util
from streamlit import util
from streamlit.Report import Report
from streamlit.logger import get_logger
from streamlit.server.Server import Server
LOGGER = get_logger(__name__)
# Wait for 1 second before opening a browser. This gives old tabs a chance to
# reconnect.
# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
BROWSER_WAIT_TIMEOUT_SEC = 1
def _set_up_signal_handler():
LOGGER.debug("Setting up signal handler")
def signal_handler(signal_number, stack_frame):
# The server will shut down its threads and stop the ioloop
Server.get_current().stop()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
if sys.platform == "win32":
signal.signal(signal.SIGBREAK, signal_handler)
else:
signal.signal(signal.SIGQUIT, signal_handler)
def _fix_sys_path(script_path):
"""Add the script's folder to the sys path.
Python normally does this automatically, but since we exec the script
ourselves we need to do it instead.
"""
sys.path.insert(0, os.path.dirname(script_path))
def _fix_matplotlib_crash():
"""Set Matplotlib backend to avoid a crash.
The default Matplotlib backend crashes Python on OSX when run on a thread
that's not the main thread, so here we set a safer backend as a fix.
Users can always disable this behavior by setting the config
runner.fixMatplotlib = false.
This fix is OS-independent. We didn't see a good reason to make this
Mac-only. Consistency within Streamlit seemed more important.
"""
if config.get_option("runner.fixMatplotlib"):
try:
# TODO: a better option may be to set
# os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards
# the top of __init__.py, before importing anything that imports
# pandas (which imports matplotlib). Alternately, we could set
# this environment variable in a new entrypoint defined in
# setup.py. Both of these introduce additional trickiness: they
# need to run without consulting streamlit.config.get_option,
# because this would import streamlit, and therefore matplotlib.
import matplotlib
matplotlib.use("Agg")
except ImportError:
pass
def _fix_tornado_crash():
"""Set default asyncio policy to be compatible with Tornado 6.
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows. So here we
pick the older SelectorEventLoopPolicy when the OS is Windows
if the known-incompatible default policy is in use.
This has to happen as early as possible to make it a low priority and
overrideable
See: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if env_util.IS_WINDOWS and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# Not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with
# Tornado 6 fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def _fix_sys_argv(script_path, args):
"""sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
"""
import sys
sys.argv = [script_path] + list(args)
def _on_server_start(server):
_print_url()
def maybe_open_browser():
if config.get_option("server.headless"):
# Don't open browser when in headless mode.
return
if server.browser_is_connected:
# Don't auto-open browser if there's already a browser connected.
# This can happen if there's an old tab repeatedly trying to
# connect, and it happens to success before we launch the browser.
return
if config.is_manually_set("browser.serverAddress"):
addr = config.get_option("browser.serverAddress")
else:
addr = "localhost"
util.open_browser(Report.get_url(addr))
# Schedule the browser to open using the IO Loop on the main thread, but
# only if no other browser connects within 1s.
ioloop = tornado.ioloop.IOLoop.current()
ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
def _print_url():
title_message = "You can now view your Streamlit app in your browser."
named_urls = []
if config.is_manually_set("browser.serverAddress"):
named_urls = [
("URL", Report.get_url(config.get_option("browser.serverAddress")))
]
elif config.get_option("server.headless"):
named_urls = [
("Network URL", Report.get_url(net_util.get_internal_ip())),
("External URL", Report.get_url(net_util.get_external_ip())),
]
else:
named_urls = [
("Local URL", Report.get_url("localhost")),
("Network URL", Report.get_url(net_util.get_internal_ip())),
]
click.secho("")
click.secho(" %s" % title_message, fg="blue", bold=True)
click.secho("")
for url_name, url in named_urls:
url_util.print_url(url_name, url)
click.secho("")
def run(script_path, command_line, args):
"""Run a script in a separate thread and start a server for the app.
This starts a blocking ioloop.
Parameters
----------
script_path : str
command_line : str
args : [str]
"""
_fix_sys_path(script_path)
_fix_matplotlib_crash()
_fix_tornado_crash()
_fix_sys_argv(script_path, args)
# Install a signal handler that will shut down the ioloop
# and close all our threads
_set_up_signal_handler()
ioloop = tornado.ioloop.IOLoop.current()
# Create and start the server.
server = Server(ioloop, script_path, command_line)
server.start(_on_server_start)
# (Must com after start(), because this starts a new thread and start() may
# call sys.exit() which doesn't kill other threads.
server.add_preheated_report_session()
# Start the ioloop. This function will not return until the
# server is shut down.
ioloop.start()
|
from EventStudy.price_fetcher import PriceFetcher
from EventStudy.return_calculator import ReturnCalculator
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RuleGroupArgs', 'RuleGroup']
@pulumi.input_type
class RuleGroupArgs:
def __init__(__self__, *,
capacity: pulumi.Input[int],
scope: pulumi.Input[str],
visibility_config: pulumi.Input['RuleGroupVisibilityConfigArgs'],
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a RuleGroup resource.
:param pulumi.Input[int] capacity: The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
:param pulumi.Input[str] scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
:param pulumi.Input['RuleGroupVisibilityConfigArgs'] visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
:param pulumi.Input[str] description: A friendly description of the rule group.
:param pulumi.Input[str] name: The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
:param pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]] rules: The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
pulumi.set(__self__, "capacity", capacity)
pulumi.set(__self__, "scope", scope)
pulumi.set(__self__, "visibility_config", visibility_config)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def capacity(self) -> pulumi.Input[int]:
"""
The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: pulumi.Input[int]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def scope(self) -> pulumi.Input[str]:
"""
Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: pulumi.Input[str]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter(name="visibilityConfig")
def visibility_config(self) -> pulumi.Input['RuleGroupVisibilityConfigArgs']:
"""
Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
return pulumi.get(self, "visibility_config")
@visibility_config.setter
def visibility_config(self, value: pulumi.Input['RuleGroupVisibilityConfigArgs']):
pulumi.set(self, "visibility_config", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A friendly description of the rule group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]]:
"""
The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _RuleGroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
lock_token: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]] = None,
scope: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_config: Optional[pulumi.Input['RuleGroupVisibilityConfigArgs']] = None):
"""
Input properties used for looking up and filtering RuleGroup resources.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the IP Set that this statement references.
:param pulumi.Input[int] capacity: The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
:param pulumi.Input[str] description: A friendly description of the rule group.
:param pulumi.Input[str] name: The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
:param pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]] rules: The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
:param pulumi.Input[str] scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input['RuleGroupVisibilityConfigArgs'] visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if description is not None:
pulumi.set(__self__, "description", description)
if lock_token is not None:
pulumi.set(__self__, "lock_token", lock_token)
if name is not None:
pulumi.set(__self__, "name", name)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
if visibility_config is not None:
pulumi.set(__self__, "visibility_config", visibility_config)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) of the IP Set that this statement references.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A friendly description of the rule group.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="lockToken")
def lock_token(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "lock_token")
@lock_token.setter
def lock_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lock_token", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]]:
"""
The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RuleGroupRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input[str]]:
"""
Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@property
@pulumi.getter(name="visibilityConfig")
def visibility_config(self) -> Optional[pulumi.Input['RuleGroupVisibilityConfigArgs']]:
"""
Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
return pulumi.get(self, "visibility_config")
@visibility_config.setter
def visibility_config(self, value: Optional[pulumi.Input['RuleGroupVisibilityConfigArgs']]):
pulumi.set(self, "visibility_config", value)
class RuleGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capacity: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleGroupRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_config: Optional[pulumi.Input[pulumi.InputType['RuleGroupVisibilityConfigArgs']]] = None,
__props__=None):
"""
Creates a WAFv2 Rule Group resource.
## Example Usage
### Simple
```python
import pulumi
import pulumi_aws as aws
example = aws.wafv2.RuleGroup("example",
capacity=2,
rules=[aws.wafv2.RuleGroupRuleArgs(
action=aws.wafv2.RuleGroupRuleActionArgs(
allow=aws.wafv2.RuleGroupRuleActionAllowArgs(),
),
name="rule-1",
priority=1,
statement=aws.wafv2.RuleGroupRuleStatementArgs(
geo_match_statement=aws.wafv2.RuleGroupRuleStatementGeoMatchStatementArgs(
country_codes=[
"US",
"NL",
],
),
),
visibility_config={
"cloudwatchMetricsEnabled": False,
"metric_name": "friendly-rule-metric-name",
"sampledRequestsEnabled": False,
},
)],
scope="REGIONAL",
visibility_config=aws.wafv2.RuleGroupVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-metric-name",
sampled_requests_enabled=False,
))
```
## Import
WAFv2 Rule Group can be imported using `ID/name/scope` e.g.
```sh
$ pulumi import aws:wafv2/ruleGroup:RuleGroup example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] capacity: The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
:param pulumi.Input[str] description: A friendly description of the rule group.
:param pulumi.Input[str] name: The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleGroupRuleArgs']]]] rules: The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
:param pulumi.Input[str] scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[pulumi.InputType['RuleGroupVisibilityConfigArgs']] visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuleGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a WAFv2 Rule Group resource.
## Example Usage
### Simple
```python
import pulumi
import pulumi_aws as aws
example = aws.wafv2.RuleGroup("example",
capacity=2,
rules=[aws.wafv2.RuleGroupRuleArgs(
action=aws.wafv2.RuleGroupRuleActionArgs(
allow=aws.wafv2.RuleGroupRuleActionAllowArgs(),
),
name="rule-1",
priority=1,
statement=aws.wafv2.RuleGroupRuleStatementArgs(
geo_match_statement=aws.wafv2.RuleGroupRuleStatementGeoMatchStatementArgs(
country_codes=[
"US",
"NL",
],
),
),
visibility_config={
"cloudwatchMetricsEnabled": False,
"metric_name": "friendly-rule-metric-name",
"sampledRequestsEnabled": False,
},
)],
scope="REGIONAL",
visibility_config=aws.wafv2.RuleGroupVisibilityConfigArgs(
cloudwatch_metrics_enabled=False,
metric_name="friendly-metric-name",
sampled_requests_enabled=False,
))
```
## Import
WAFv2 Rule Group can be imported using `ID/name/scope` e.g.
```sh
$ pulumi import aws:wafv2/ruleGroup:RuleGroup example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL
```
:param str resource_name: The name of the resource.
:param RuleGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuleGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
capacity: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleGroupRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_config: Optional[pulumi.Input[pulumi.InputType['RuleGroupVisibilityConfigArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuleGroupArgs.__new__(RuleGroupArgs)
if capacity is None and not opts.urn:
raise TypeError("Missing required property 'capacity'")
__props__.__dict__["capacity"] = capacity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["rules"] = rules
if scope is None and not opts.urn:
raise TypeError("Missing required property 'scope'")
__props__.__dict__["scope"] = scope
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
if visibility_config is None and not opts.urn:
raise TypeError("Missing required property 'visibility_config'")
__props__.__dict__["visibility_config"] = visibility_config
__props__.__dict__["arn"] = None
__props__.__dict__["lock_token"] = None
super(RuleGroup, __self__).__init__(
'aws:wafv2/ruleGroup:RuleGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
lock_token: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleGroupRuleArgs']]]]] = None,
scope: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
visibility_config: Optional[pulumi.Input[pulumi.InputType['RuleGroupVisibilityConfigArgs']]] = None) -> 'RuleGroup':
"""
Get an existing RuleGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) of the IP Set that this statement references.
:param pulumi.Input[int] capacity: The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
:param pulumi.Input[str] description: A friendly description of the rule group.
:param pulumi.Input[str] name: The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleGroupRuleArgs']]]] rules: The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
:param pulumi.Input[str] scope: Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
:param pulumi.Input[pulumi.InputType['RuleGroupVisibilityConfigArgs']] visibility_config: Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RuleGroupState.__new__(_RuleGroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["capacity"] = capacity
__props__.__dict__["description"] = description
__props__.__dict__["lock_token"] = lock_token
__props__.__dict__["name"] = name
__props__.__dict__["rules"] = rules
__props__.__dict__["scope"] = scope
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["visibility_config"] = visibility_config
return RuleGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the IP Set that this statement references.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def capacity(self) -> pulumi.Output[int]:
"""
The web ACL capacity units (WCUs) required for this rule group. See [here](https://docs.aws.amazon.com/waf/latest/APIReference/API_CreateRuleGroup.html#API_CreateRuleGroup_RequestSyntax) for general information and [here](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rule-statements-list.html) for capacity specific information.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A friendly description of the rule group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="lockToken")
def lock_token(self) -> pulumi.Output[str]:
return pulumi.get(self, "lock_token")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the custom header. For custom request header insertion, when AWS WAF inserts the header into the request, it prefixes this name `x-amzn-waf-`, to avoid confusion with the headers that are already in the request. For example, for the header name `sample`, AWS WAF inserts the header `x-amzn-waf-sample`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rules(self) -> pulumi.Output[Optional[Sequence['outputs.RuleGroupRule']]]:
"""
The rule blocks used to identify the web requests that you want to `allow`, `block`, or `count`. See Rules below for details.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[str]:
"""
Specifies whether this is for an AWS CloudFront distribution or for a regional application. Valid values are `CLOUDFRONT` or `REGIONAL`. To work with CloudFront, you must also specify the region `us-east-1` (N. Virginia) on the AWS provider.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
An array of key:value pairs to associate with the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@property
@pulumi.getter(name="visibilityConfig")
def visibility_config(self) -> pulumi.Output['outputs.RuleGroupVisibilityConfig']:
"""
Defines and enables Amazon CloudWatch metrics and web request sample collection. See Visibility Configuration below for details.
"""
return pulumi.get(self, "visibility_config")
|
import requests
import time
import logging
import datetime
logger = logging.getLogger()
# 设置此logger的最低日志级别,之后添加的Handler级别如果低于这个设置,则以这个设置为最低限制
logger.setLevel(logging.INFO)
# 创建一个FileHandler,将日志输出到文件
log_file = 'log/sys_%s.log' % datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
file_handler = logging.FileHandler(log_file)
# 设置此Handler的最低日志级别
file_handler.setLevel(logging.WARNING)
# 设置此Handler的日志输出字符串格式
log_formatter = logging.Formatter('%(asctime)s[%(levelname)s]: %(message)s')
file_handler.setFormatter(log_formatter)
# 创建一个StreamHandler,将日志输出到Stream,默认输出到sys.stderr
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
# 将不同的Handler添加到logger中,日志就会同时输出到不同的Handler控制的输出中
# 注意如果此logger在之前使用basicConfig进行基础配置,因为basicConfig会自动创建一个Handler,所以此logger将会有3个Handler
# 会将日志同时输出到3个Handler控制的输出中
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
|
#!/usr/env/bin python3
import pygame
from Game.Classes.image import image
class environment(object):
def __init__(self, name, ecran, posX = 0, posY = 0):
self.ecran = ecran
self.posX = posX
self.posY = posY
self.name = name
self.image = None
def getImage(self):
if self.image == None:
self.image = pygame.image.load(image.getImagePath()+"Environment/" + self.name + ".png").convert_alpha()
return self.image
@property
def startPosX(self):
return self.posX
@property
def endPosX(self):
return self.posX + self.image.get_width()
def refresh(self):
self.ecran.blit(self.getImage(), (self.posX, self.posY))
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Register gcloud as a Docker credential helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.docker import credential_utils as cred_utils
from googlecloudsdk.core.util import files as file_utils
class ConfigureDockerError(exceptions.Error):
"""General command error class."""
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class ConfigureDocker(base.Command):
# pylint: disable=line-too-long
r"""Register `gcloud` as a Docker credential helper.
{command} adds the Docker `credHelper` entry to Docker's configuration file,
or creates the file if it doesn't exist. This will register `gcloud` as the
credential helper for all Google-supported Docker registries. If the Docker
configuration already contains a `credHelper` entry, it will be overwritten.
Note: `docker` and `gcloud` need to be on the same system `PATH` to work
correctly.
Note: This command will not work for `docker` installed via Snap, as the
`docker` snap package does not currently provide an interface for credential
helpers.
For more details on Docker registries, see
[](https://docs.docker.com/registry/).
For more details on Docker credential helpers, see
[](https://docs.docker.com/engine/reference/commandline/login/#credential-helpers).
For more details on the Google Container Registry's Docker credential helper,
see [](https://github.com/GoogleCloudPlatform/docker-credential-gcr).
"""
# pylint: enable=line-too-long
def DockerCredentialGcloudExists(self):
return file_utils.SearchForExecutableOnPath(
'docker-credential-gcloud') or file_utils.SearchForExecutableOnPath(
'docker-credential-gcloud.cmd')
def DockerExists(self):
return file_utils.SearchForExecutableOnPath(
'docker') or file_utils.SearchForExecutableOnPath('docker.exe')
@staticmethod
def Args(parser):
"""Set args for configure-docker."""
parser.add_argument(
'registries',
nargs='?',
help='The comma-separated list of registries to configure the credential helper for.'
)
parser.add_argument(
'--include-artifact-registry',
action='store_true',
help='Whether to include all Artifact Registry domains.',
hidden=True)
def Run(self, args):
"""Run the configure-docker command."""
if not self.DockerCredentialGcloudExists():
log.warning('`docker-credential-gcloud` not in system PATH.\n'
'gcloud\'s Docker credential helper can be configured but '
'it will not work until this is corrected.')
current_config = cred_utils.Configuration.ReadFromDisk()
if self.DockerExists():
if not current_config.SupportsRegistryHelpers():
raise ConfigureDockerError(
'Invalid Docker version: The version of your Docker client is '
'[{}]; version [{}] or higher is required to support Docker '
'credential helpers.'.format(
current_config.DockerVersion(),
cred_utils.MIN_DOCKER_CONFIG_HELPER_VERSION))
else:
log.warning(
'`docker` not in system PATH.\n'
'`docker` and `docker-credential-gcloud` need to be in the same PATH '
'in order to work correctly together.\n'
'gcloud\'s Docker credential helper can be configured but '
'it will not work until this is corrected.')
current_helpers = current_config.GetRegisteredCredentialHelpers()
current_helper_map = {}
if current_helpers:
log.warning('Your config file at [{0}] contains these credential helper '
'entries:\n\n{1}'.format(
current_config.path,
json.dumps(current_helpers, indent=2)))
current_helper_map = current_helpers[cred_utils.CREDENTIAL_HELPER_KEY]
# Use the value from the argument, otherwise the default list.
if args.registries:
log.status.Print('Adding credentials for: {0}'.format(args.registries))
registries = filter(self.CheckValidRegistry, args.registries.split(','))
new_helpers = cred_utils.GetGcloudCredentialHelperConfig(registries)
else:
# If include-artifact-registry is set, add all GCR and AR repos, otherwise
# just GCR repos.
if args.include_artifact_registry:
log.status.Print('Adding credentials for all GCR and AR repositories.')
else:
log.status.Print('Adding credentials for all GCR repositories.')
log.warning('A long list of credential helpers may cause delays running '
'\'docker build\'. We recommend passing the registry name to '
'configure only the registry you are using.')
new_helpers = cred_utils.GetGcloudCredentialHelperConfig(
None, args.include_artifact_registry)
# Merge in the new settings so that existing configs are preserved.
merged_helper_map = current_helper_map.copy()
merged_helper_map.update(new_helpers[cred_utils.CREDENTIAL_HELPER_KEY])
if current_helper_map == merged_helper_map:
log.status.Print(
'gcloud credential helpers already registered correctly.')
return
merged_helpers = {cred_utils.CREDENTIAL_HELPER_KEY: merged_helper_map}
console_io.PromptContinue(
message='After update, the following will be written to your Docker '
'config file located at [{0}]:\n {1}'.format(
current_config.path, json.dumps(merged_helpers, indent=2)),
cancel_on_no=True)
current_config.RegisterCredentialHelpers(merged_helper_map)
log.status.Print('Docker configuration file updated.')
def CheckValidRegistry(self, registry):
if registry not in cred_utils.SupportedRegistries():
log.warning('{0} is not a supported registry'.format(registry))
return False
return True
|
"""
Cog containing EVE Online commands which can be used by anyone
"""
import logging
from datetime import datetime
import discord.ext.commands as commands
from utils.log import get_logger
def setup(bot):
"Adds the cog to the provided discord bot"
bot.add_cog(Eve(bot))
class Eve:
def __init__(self, bot):
self.logger = get_logger(__name__, bot)
self.bot = bot
self.fmt = "%H:%M:%S"
@commands.command()
async def evetime(self):
return await self.bot.say(
"Current EVE time: " + datetime.utcnow().strftime(self.fmt))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnection']
class PrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PrivateEndpointPropertyArgs']] private_endpoint: Private endpoint which the connection belongs to.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStatePropertyArgs']] private_link_service_connection_state: Connection state of the private endpoint connection.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['private_endpoint'] = private_endpoint
if private_endpoint_connection_name is None:
raise TypeError("Missing required property 'private_endpoint_connection_name'")
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if server_name is None:
raise TypeError("Missing required property 'server_name'")
__props__['server_name'] = server_name
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:dbforpostgresql/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:dbforpostgresql/v20180601:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-nextgen:dbforpostgresql/v20180601privatepreview:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointPropertyResponse']]:
"""
Private endpoint which the connection belongs to.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStatePropertyResponse']]:
"""
Connection state of the private endpoint connection.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
State of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import csv
import os
import tarfile
from io import StringIO
from .const import MAX_YEAR, MIN_YEAR
from .exceptions import UnknownExtensionException
def get_loxfiles(path):
for y_root, y_dirs, y_files in os.walk(path):
for f_elem in y_files:
if f_elem.lower().endswith(".lox"):
yield f_elem
def get_year_dirs(path):
# In the path given, find subdirectories named like years
# eg. "2018", "2017"...
for s_root, s_dirs, s_files in os.walk(path):
for elem in s_dirs:
try:
y = int(elem)
except (TypeError, ValueError):
continue
if y < MIN_YEAR or y > MAX_YEAR:
continue
# Those "year" subdirectories should contain files with .lox
# extension, if there is at least one file, this is a "year"
# directory and should be yielded:
for _ign in get_loxfiles(os.path.join(s_root, elem)):
yield y
break
# Only one level deep
break
def get_machines(path):
"""
Return serial NOs of PrismaFlex® machines in given path.
PrismaFlex® directory with exported historical data looks like:
[serial number of the machine] / [year] / [examination file].LOX
This function yields any sub-directory of *path*, that contains a
a 4-digit directory (in range 1900-2200), that contains at least
one file with ".LOX" extension
"""
for root, dirs, files in os.walk(path):
for d in dirs:
for _ign in get_year_dirs(os.path.join(root, d)):
yield d
break
# Only one level deep
break
def dir_contains_pflex_data(path):
"""
This function is intended to be used on root directories of mounted
filesystems (USB stick, memory card reader) to detect if a given
media contains any PrismaFlex® data.
:return: boolean
"""
for _ign in get_machines(path):
return True
return False
extmap = {
"pci": ("Network config", ["ascii", "ini", "split", "strip"]),
"pca": None, # Some binary data, skip it
"pcu": ("Therapy config", ["ascii", "ini", "split", "strip"]),
"pcm": ("Machine config", ["ascii", "split"]),
"plr": ("System events", ["ascii", "split", "strip", "noemptylines"]),
"ple": ("User events", ["utf-16", "split", "strip", "csv"]),
"plp": ("Pressure", ["utf-8", "split", "strip", "csv"]),
"pls": ("Fluids", ["ascii", "split", "strip", "csv", ]),
"ply": ("Syringe", ["ascii", "split", "strip", "csv", ]),
"plc": ("PLC", ["ascii", "split", "strip", "csv", ]),
"plt": ("Tare", ["ascii", "split", "strip", "csv", ]),
"pli": ("PLI", ["ascii", "split", "strip", "csv", ]),
"pll": ("PLL", ["ascii", "split", "strip", "csv", ])
}
def get_loxfile_data(fname):
"""
Returns all the data contained in the loxfile
:param fname:
:return: dictionary
"""
ret = {}
tar = tarfile.open(fname, "r:gz")
for member in tar.getnames():
_ign, ext = map(str.lower, member.split("."))
f = tar.extractfile(member)
if ext in extmap:
if extmap[ext] is None:
continue
desc, extra = extmap[ext]
data = f.read()
for elem in extra:
if elem == "strip":
data = [x.strip() for x in data]
continue
if elem == "split":
data = data.split("\n")
continue
if elem in ["utf-8", "utf-16", "ascii"]:
data = data.decode(elem)
continue
if elem == "csv":
data = [x for x in csv.reader(data, delimiter=';')]
continue
if elem == "noemptylines":
data = [x for x in data if x]
continue
ret[desc] = data
else:
raise UnknownExtensionException(ext)
return ret
|
from typing import (
Any,
Dict,
List,
Optional,
Set,
TYPE_CHECKING,
Tuple,
Type,
Union,
cast,
)
import databases
import pydantic
import sqlalchemy
from sqlalchemy.sql.schema import ColumnCollectionConstraint
import ormar # noqa I100
from ormar import ForeignKey, Integer, ModelDefinitionError # noqa I100
from ormar.fields import BaseField
from ormar.fields.foreign_key import ForeignKeyField
from ormar.fields.many_to_many import ManyToManyField
from ormar.models.helpers import (
alias_manager,
expand_reverse_relationships,
extract_annotations_and_default_vals,
get_potential_fields,
get_pydantic_base_orm_config,
get_pydantic_field,
populate_default_options_values,
populate_meta_sqlalchemy_table_if_required,
populate_meta_tablename_columns_and_pk,
register_relation_in_alias_manager,
)
from ormar.models.quick_access_views import quick_access_set
from ormar.queryset import QuerySet
from ormar.relations.alias_manager import AliasManager
from ormar.signals import Signal, SignalEmitter
if TYPE_CHECKING: # pragma no cover
from ormar import Model
PARSED_FIELDS_KEY = "__parsed_fields__"
CONFIG_KEY = "Config"
class ModelMeta:
"""
Class used for type hinting.
Users can subclass this one for convenience but it's not required.
The only requirement is that ormar.Model has to have inner class with name Meta.
"""
tablename: str
table: sqlalchemy.Table
metadata: sqlalchemy.MetaData
database: databases.Database
columns: List[sqlalchemy.Column]
constraints: List[ColumnCollectionConstraint]
pkname: str
model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
]
alias_manager: AliasManager
property_fields: Set
signals: SignalEmitter
abstract: bool
requires_ref_update: bool
def check_if_field_has_choices(field: Type[BaseField]) -> bool:
"""
Checks if given field has choices populated.
A if it has one, a validator for this field needs to be attached.
:param field: ormar field to check
:type field: BaseField
:return: result of the check
:rtype: bool
"""
return hasattr(field, "choices") and bool(field.choices)
def choices_validator(cls: Type["Model"], values: Dict[str, Any]) -> Dict[str, Any]:
"""
Validator that is attached to pydantic model pre root validators.
Validator checks if field value is in field.choices list.
:raises ValueError: if field value is outside of allowed choices.
:param cls: constructed class
:type cls: Model class
:param values: dictionary of field values (pydantic side)
:type values: Dict[str, Any]
:return: values if pass validation, otherwise exception is raised
:rtype: Dict[str, Any]
"""
for field_name, field in cls.Meta.model_fields.items():
if check_if_field_has_choices(field):
value = values.get(field_name, ormar.Undefined)
if value is not ormar.Undefined and value not in field.choices:
raise ValueError(
f"{field_name}: '{values.get(field_name)}' "
f"not in allowed choices set:"
f" {field.choices}"
)
return values
def populate_choices_validators(model: Type["Model"]) -> None: # noqa CCR001
"""
Checks if Model has any fields with choices set.
If yes it adds choices validation into pre root validators.
:param model: newly constructed Model
:type model: Model class
"""
if not meta_field_not_set(model=model, field_name="model_fields"):
for _, field in model.Meta.model_fields.items():
if check_if_field_has_choices(field):
validators = getattr(model, "__pre_root_validators__", [])
if choices_validator not in validators:
validators.append(choices_validator)
model.__pre_root_validators__ = validators
def add_cached_properties(new_model: Type["Model"]) -> None:
"""
Sets cached properties for both pydantic and ormar models.
Quick access fields are fields grabbed in getattribute to skip all checks.
Related fields and names are populated to None as they can change later.
When children models are constructed they can modify parent to register itself.
All properties here are used as "cache" to not recalculate them constantly.
:param new_model: newly constructed Model
:type new_model: Model class
"""
new_model._quick_access_fields = quick_access_set
new_model._related_names = None
new_model._related_fields = None
new_model._pydantic_fields = {name for name in new_model.__fields__}
def meta_field_not_set(model: Type["Model"], field_name: str) -> bool:
"""
Checks if field with given name is already present in model.Meta.
Then check if it's set to something truthful
(in practice meaning not None, as it's non or ormar Field only).
:param model: newly constructed model
:type model: Model class
:param field_name: name of the ormar field
:type field_name: str
:return: result of the check
:rtype: bool
"""
return not hasattr(model.Meta, field_name) or not getattr(model.Meta, field_name)
def add_property_fields(new_model: Type["Model"], attrs: Dict) -> None: # noqa: CCR001
"""
Checks class namespace for properties or functions with __property_field__.
If attribute have __property_field__ it was decorated with @property_field.
Functions like this are exposed in dict() (therefore also fastapi result).
Names of property fields are cached for quicker access / extraction.
:param new_model: newly constructed model
:type new_model: Model class
:param attrs:
:type attrs: Dict[str, str]
"""
props = set()
for var_name, value in attrs.items():
if isinstance(value, property):
value = value.fget
field_config = getattr(value, "__property_field__", None)
if field_config:
props.add(var_name)
if meta_field_not_set(model=new_model, field_name="property_fields"):
new_model.Meta.property_fields = props
else:
new_model.Meta.property_fields = new_model.Meta.property_fields.union(props)
def register_signals(new_model: Type["Model"]) -> None: # noqa: CCR001
"""
Registers on model's SignalEmmiter and sets pre defined signals.
Predefined signals are (pre/post) + (save/update/delete).
Signals are emitted in both model own methods and in selected queryset ones.
:param new_model: newly constructed model
:type new_model: Model class
"""
if meta_field_not_set(model=new_model, field_name="signals"):
signals = SignalEmitter()
signals.pre_save = Signal()
signals.pre_update = Signal()
signals.pre_delete = Signal()
signals.post_save = Signal()
signals.post_update = Signal()
signals.post_delete = Signal()
new_model.Meta.signals = signals
def update_attrs_and_fields(
attrs: Dict,
new_attrs: Dict,
model_fields: Dict,
new_model_fields: Dict,
new_fields: Set,
) -> Dict:
"""
Updates __annotations__, values of model fields (so pydantic FieldInfos)
as well as model.Meta.model_fields definitions from parents.
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param new_attrs: related of the namespace extracted from parent class
:type new_attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:param new_model_fields: ormar fields defined in parent classes
:type new_model_fields: Dict[str, BaseField]
:param new_fields: set of new fields names
:type new_fields: Set[str]
"""
key = "__annotations__"
attrs[key].update(new_attrs[key])
attrs.update({name: new_attrs[name] for name in new_fields})
updated_model_fields = {k: v for k, v in new_model_fields.items()}
updated_model_fields.update(model_fields)
return updated_model_fields
def verify_constraint_names(
base_class: "Model", model_fields: Dict, parent_value: List
) -> None:
"""
Verifies if redefined fields that are overwritten in subclasses did not remove
any name of the column that is used in constraint as it will fail in sqlalchemy
Table creation.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:param parent_value: list of base class constraints
:type parent_value: List
"""
new_aliases = {x.name: x.get_alias() for x in model_fields.values()}
old_aliases = {x.name: x.get_alias() for x in base_class.Meta.model_fields.values()}
old_aliases.update(new_aliases)
constraints_columns = [x._pending_colargs for x in parent_value]
for column_set in constraints_columns:
if any(x not in old_aliases.values() for x in column_set):
raise ModelDefinitionError(
f"Unique columns constraint "
f"{column_set} "
f"has column names "
f"that are not in the model fields."
f"\n Check columns redefined in subclasses "
f"to verify that they have proper 'name' set."
)
def update_attrs_from_base_meta( # noqa: CCR001
base_class: "Model", attrs: Dict, model_fields: Dict
) -> None:
"""
Updates Meta parameters in child from parent if needed.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
"""
params_to_update = ["metadata", "database", "constraints"]
for param in params_to_update:
current_value = attrs.get("Meta", {}).__dict__.get(param, ormar.Undefined)
parent_value = (
base_class.Meta.__dict__.get(param) if hasattr(base_class, "Meta") else None
)
if parent_value:
if param == "constraints":
verify_constraint_names(
base_class=base_class,
model_fields=model_fields,
parent_value=parent_value,
)
parent_value = [
ormar.UniqueColumns(*x._pending_colargs) for x in parent_value
]
if isinstance(current_value, list):
current_value.extend(parent_value)
else:
setattr(attrs["Meta"], param, parent_value)
def copy_and_replace_m2m_through_model(
field: Type[ManyToManyField],
field_name: str,
table_name: str,
parent_fields: Dict,
attrs: Dict,
meta: ModelMeta,
) -> None:
"""
Clones class with Through model for m2m relations, appends child name to the name
of the cloned class.
Clones non foreign keys fields from parent model, the same with database columns.
Modifies related_name with appending child table name after '_'
For table name, the table name of child is appended after '_'.
Removes the original sqlalchemy table from metadata if it was not removed.
:param field: field with relations definition
:type field: Type[ManyToManyField]
:param field_name: name of the relation field
:type field_name: str
:param table_name: name of the table
:type table_name: str
:param parent_fields: dictionary of fields to copy to new models from parent
:type parent_fields: Dict
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param meta: metaclass of currently created model
:type meta: ModelMeta
"""
copy_field: Type[BaseField] = type( # type: ignore
field.__name__, (ManyToManyField, BaseField), dict(field.__dict__)
)
related_name = field.related_name + "_" + table_name
copy_field.related_name = related_name # type: ignore
through_class = field.through
new_meta: ormar.ModelMeta = type( # type: ignore
"Meta", (), dict(through_class.Meta.__dict__),
)
new_meta.tablename += "_" + meta.tablename
# create new table with copied columns but remove foreign keys
# they will be populated later in expanding reverse relation
if hasattr(new_meta, "table"):
del new_meta.table
new_meta.columns = [col for col in new_meta.columns if not col.foreign_keys]
new_meta.model_fields = {
name: field
for name, field in new_meta.model_fields.items()
if not issubclass(field, ForeignKeyField)
}
populate_meta_sqlalchemy_table_if_required(new_meta)
copy_name = through_class.__name__ + attrs.get("__name__", "")
copy_through = type(copy_name, (ormar.Model,), {"Meta": new_meta})
copy_field.through = copy_through
parent_fields[field_name] = copy_field
if through_class.Meta.table in through_class.Meta.metadata:
through_class.Meta.metadata.remove(through_class.Meta.table)
def copy_data_from_parent_model( # noqa: CCR001
base_class: Type["Model"],
curr_class: type,
attrs: Dict,
model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
],
) -> Tuple[Dict, Dict]:
"""
Copy the key parameters [databse, metadata, property_fields and constraints]
and fields from parent models. Overwrites them if needed.
Only abstract classes can be subclassed.
Since relation fields requires different related_name for different children
:raises ModelDefinitionError: if non abstract model is subclassed
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param curr_class: current constructed class
:type curr_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:return: updated attrs and model_fields
:rtype: Tuple[Dict, Dict]
"""
if attrs.get("Meta"):
if model_fields and not base_class.Meta.abstract: # type: ignore
raise ModelDefinitionError(
f"{curr_class.__name__} cannot inherit "
f"from non abstract class {base_class.__name__}"
)
update_attrs_from_base_meta(
base_class=base_class, # type: ignore
attrs=attrs,
model_fields=model_fields,
)
parent_fields: Dict = dict()
meta = attrs.get("Meta")
if not meta: # pragma: no cover
raise ModelDefinitionError(
f"Model {curr_class.__name__} declared without Meta"
)
table_name = (
meta.tablename
if hasattr(meta, "tablename") and meta.tablename
else attrs.get("__name__", "").lower() + "s"
)
for field_name, field in base_class.Meta.model_fields.items():
if issubclass(field, ManyToManyField):
copy_and_replace_m2m_through_model(
field=field,
field_name=field_name,
table_name=table_name,
parent_fields=parent_fields,
attrs=attrs,
meta=meta,
)
elif issubclass(field, ForeignKeyField) and field.related_name:
copy_field = type( # type: ignore
field.__name__, (ForeignKeyField, BaseField), dict(field.__dict__)
)
related_name = field.related_name + "_" + table_name
copy_field.related_name = related_name # type: ignore
parent_fields[field_name] = copy_field
else:
parent_fields[field_name] = field
parent_fields.update(model_fields) # type: ignore
model_fields = parent_fields
return attrs, model_fields
def extract_from_parents_definition( # noqa: CCR001
base_class: type,
curr_class: type,
attrs: Dict,
model_fields: Dict[
str, Union[Type[BaseField], Type[ForeignKeyField], Type[ManyToManyField]]
],
) -> Tuple[Dict, Dict]:
"""
Extracts fields from base classes if they have valid oramr fields.
If model was already parsed -> fields definitions need to be removed from class
cause pydantic complains about field re-definition so after first child
we need to extract from __parsed_fields__ not the class itself.
If the class is parsed first time annotations and field definition is parsed
from the class.__dict__.
If the class is a ormar.Model it is skipped.
:param base_class: one of the parent classes
:type base_class: Model or model parent class
:param curr_class: current constructed class
:type curr_class: Model or model parent class
:param attrs: new namespace for class being constructed
:type attrs: Dict
:param model_fields: ormar fields in defined in current class
:type model_fields: Dict[str, BaseField]
:return: updated attrs and model_fields
:rtype: Tuple[Dict, Dict]
"""
if hasattr(base_class, "Meta"):
base_class = cast(Type["Model"], base_class)
return copy_data_from_parent_model(
base_class=base_class,
curr_class=curr_class,
attrs=attrs,
model_fields=model_fields,
)
key = "__annotations__"
if hasattr(base_class, PARSED_FIELDS_KEY):
# model was already parsed -> fields definitions need to be removed from class
# cause pydantic complains about field re-definition so after first child
# we need to extract from __parsed_fields__ not the class itself
new_attrs, new_model_fields = getattr(base_class, PARSED_FIELDS_KEY)
new_fields = set(new_model_fields.keys())
model_fields = update_attrs_and_fields(
attrs=attrs,
new_attrs=new_attrs,
model_fields=model_fields,
new_model_fields=new_model_fields,
new_fields=new_fields,
)
return attrs, model_fields
potential_fields = get_potential_fields(base_class.__dict__)
if potential_fields:
# parent model has ormar fields defined and was not parsed before
new_attrs = {key: {k: v for k, v in base_class.__dict__.get(key, {}).items()}}
new_attrs.update(potential_fields)
new_fields = set(potential_fields.keys())
for name in new_fields:
delattr(base_class, name)
new_attrs, new_model_fields = extract_annotations_and_default_vals(new_attrs)
setattr(base_class, PARSED_FIELDS_KEY, (new_attrs, new_model_fields))
model_fields = update_attrs_and_fields(
attrs=attrs,
new_attrs=new_attrs,
model_fields=model_fields,
new_model_fields=new_model_fields,
new_fields=new_fields,
)
return attrs, model_fields
class ModelMetaclass(pydantic.main.ModelMetaclass):
def __new__( # type: ignore # noqa: CCR001
mcs: "ModelMetaclass", name: str, bases: Any, attrs: dict
) -> "ModelMetaclass":
"""
Metaclass used by ormar Models that performs configuration
and build of ormar Models.
Sets pydantic configuration.
Extract model_fields and convert them to pydantic FieldInfo,
updates class namespace.
Extracts settings and fields from parent classes.
Fetches methods decorated with @property_field decorator
to expose them later in dict().
Construct parent pydantic Metaclass/ Model.
If class has Meta class declared (so actual ormar Models) it also:
* populate sqlalchemy columns, pkname and tables from model_fields
* register reverse relationships on related models
* registers all relations in alias manager that populates table_prefixes
* exposes alias manager on each Model
* creates QuerySet for each model and exposes it on a class
:param name: name of current class
:type name: str
:param bases: base classes
:type bases: Tuple
:param attrs: class namespace
:type attrs: Dict
"""
attrs["Config"] = get_pydantic_base_orm_config()
attrs["__name__"] = name
attrs, model_fields = extract_annotations_and_default_vals(attrs)
for base in reversed(bases):
mod = base.__module__
if mod.startswith("ormar.models.") or mod.startswith("pydantic."):
continue
attrs, model_fields = extract_from_parents_definition(
base_class=base, curr_class=mcs, attrs=attrs, model_fields=model_fields
)
new_model = super().__new__( # type: ignore
mcs, name, bases, attrs
)
add_cached_properties(new_model)
if hasattr(new_model, "Meta"):
populate_default_options_values(new_model, model_fields)
add_property_fields(new_model, attrs)
register_signals(new_model=new_model)
populate_choices_validators(new_model)
if not new_model.Meta.abstract:
new_model = populate_meta_tablename_columns_and_pk(name, new_model)
populate_meta_sqlalchemy_table_if_required(new_model.Meta)
expand_reverse_relationships(new_model)
for field in new_model.Meta.model_fields.values():
register_relation_in_alias_manager(field=field)
if new_model.Meta.pkname not in attrs["__annotations__"]:
field_name = new_model.Meta.pkname
attrs["__annotations__"][field_name] = Optional[int] # type: ignore
attrs[field_name] = None
new_model.__fields__[field_name] = get_pydantic_field(
field_name=field_name, model=new_model
)
new_model.Meta.alias_manager = alias_manager
new_model.objects = QuerySet(new_model)
return new_model
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.keras models using tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import loss_reduction
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import nest
_RANDOM_SEED = 1337
_TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
# TODO(anjalisridhar): Add a decorator that will allow us to run these tests as
# part of the tf.keras unit tests suite.
def simple_sequential_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
return model
def simple_subclassed_model(num_labels=_NUM_CLASS):
class _SimpleMLP(keras.Model):
def __init__(self, num_labels):
super(_SimpleMLP, self).__init__()
self.dense = keras.layers.Dense(num_labels)
def call(self, inputs):
return self.dense(inputs)
return _SimpleMLP(num_labels)
def simple_multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
merged = keras.layers.concatenate([input_a, input_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
return model
def get_multi_inputs_multi_outputs_data():
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3,
random_seed=_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2,
random_seed=_RANDOM_SEED)
(m_train, _), (m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2,
random_seed=_RANDOM_SEED)
c_train = np_utils.to_categorical(c_train)
c_test = np_utils.to_categorical(c_test)
d_train = np_utils.to_categorical(d_train)
d_test = np_utils.to_categorical(d_test)
train_data = {
'input_a': a_train,
'input_b': b_train,
'input_m': m_train,
'output_c': c_train,
'output_d': d_train
}
test_data = {
'input_a': a_test,
'input_b': b_test,
'input_m': m_test,
'output_c': c_test,
'output_d': d_test
}
return (train_data, test_data)
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
model = keras.Model(x, y)
return model
def get_sample_weights_model():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(
1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(
x)
model = keras.Model(x, y)
return model
def get_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def get_predict_dataset(distribution):
inputs = np.zeros((10, 3), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
dataset = dataset.repeat(100)
dataset = batch_wrapper(dataset, 10, distribution)
return dataset
def convert_numpy_to_dataset_with_unknown_cardinality(inputs, targets=None):
if targets is not None:
input_slices = (inputs, targets)
dummy_op = (lambda inp, target: True)
else:
input_slices = inputs
dummy_op = (lambda inp: True)
original_dataset = (dataset_ops.Dataset.from_tensor_slices(input_slices))
ds_with_unknown_cardinality = (
original_dataset.filter(dummy_op).batch(10, drop_remainder=True))
return ds_with_unknown_cardinality
def multi_input_output_model():
a = keras.layers.Input(shape=(3,), name='input_a')
b = keras.layers.Input(shape=(5,), name='input_b')
# TODO(anjalisridhar): Change the output dimension of the second Dense layer
# once the iterator output validation issue has been fixed.
dense_1 = keras.layers.Dense(7, name='dense_1')
dense_2 = keras.layers.Dense(7, name='dense_2')
c = dense_1(a)
d = dense_2(b)
e = keras.layers.Dropout(0.5, name='dropout')(c)
model = keras.models.Model([a, b], [d, e])
return model
strategies_minus_default_minus_tpu = [
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]
strategies_minus_tpu = [
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]
tpu_strategies = [
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
all_strategies = strategies_minus_tpu + tpu_strategies
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=['graph', 'eager'])
def tpu_strategy_combinations():
return combinations.combine(
distribution=tpu_strategies, mode=['graph', 'eager'])
def tpu_strategy_combinations_graph_only():
return combinations.combine(distribution=tpu_strategies, mode=['graph'])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_combinations_plus_run_distributed():
return (combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager']) + combinations.combine(
distribution=tpu_strategies,
mode=['graph', 'eager']))
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager'])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
def strategy_and_optimizer_combinations():
non_tpu_strategies = combinations.times(
strategy_minus_tpu_combinations(),
combinations.combine(
optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.adadelta_optimizer_keras_v2_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.adamax_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.nadam_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn,
strategy_combinations.ftrl_optimizer_keras_v2_fn
]))
tpu_strategies_graph = combinations.combine(
distribution=tpu_strategies,
mode=['graph'],
optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
])
tpu_strategies_eager = combinations.combine(
distribution=tpu_strategies,
mode=['eager'],
optimizer=[
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
])
return non_tpu_strategies + tpu_strategies_eager + tpu_strategies_graph
class BatchCountingCB(keras.callbacks.Callback):
def __init__(self):
super(BatchCountingCB, self).__init__()
self.train_begin_batches = []
self.train_end_batches = []
self.test_begin_batches = []
self.test_end_batches = []
self.predict_begin_batches = []
self.predict_end_batches = []
def on_train_batch_begin(self, batch, logs=None):
self.train_begin_batches.append(batch)
def on_train_batch_end(self, batch, logs=None):
self.train_end_batches.append(batch)
def on_test_batch_begin(self, batch, logs=None):
self.test_begin_batches.append(batch)
def on_test_batch_end(self, batch, logs=None):
self.test_end_batches.append(batch)
def on_predict_batch_begin(self, batch, logs=None):
self.predict_begin_batches.append(batch)
def on_predict_batch_end(self, batch, logs=None):
self.predict_end_batches.append(batch)
class TestDistributionStrategyWithNumpyArrays(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_no_steps_no_batch_size(self, distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Default global batch size 32 for input with 64 samples run in 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=None, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# Computed global batch size 20 is lower than 32 if we pass less samples.
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 20, steps=None, batch_size=None)
self.assertEqual(batch_size, 20 // replica_scale_factor)
self.assertEqual(steps, 1)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_with_steps_no_batch_size(
self, distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Computed global batch size is correct for number of specified 1 step
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=1, batch_size=None)
self.assertEqual(batch_size, 64 // replica_scale_factor)
self.assertEqual(steps, 1)
# Computed global batch size is correct for number of specified 2 steps
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=2, batch_size=None)
self.assertEqual(batch_size, 32 // replica_scale_factor)
self.assertEqual(steps, 2)
# All samples can not be consumed in specified number of steps
with self.assertRaisesRegexp(ValueError, 'not divisible by steps'):
distributed_training_utils.get_input_params(
distribution, 63, steps=2, batch_size=None)
# This cases is different for different strategies due to the
# difference in supported batch size being global or per-replica.
if replica_scale_factor == 1:
# Computed global batch size is correct even if not sharadable
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 63, steps=3, batch_size=None)
self.assertEqual(batch_size, 21)
self.assertEqual(steps, 3)
else:
# Computed global batch size can not be sharded across replicas
with self.assertRaisesRegexp(
ValueError, 'could not be sharded evenly '
'across the sync replicas'):
distributed_training_utils.get_input_params(
distribution, 63, steps=1, batch_size=None)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_no_steps_with_batch_size(
self, distribution):
# Calculate the per_replica_batch_size scaling factor for strategies
# that use per_core_batch_size
replica_scale_factor = 1.0
if not distributed_training_utils.global_batch_size_supported(distribution):
replica_scale_factor = distribution.num_replicas_in_sync
with self.cached_session():
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=None, batch_size=16)
self.assertEqual(batch_size, 16)
self.assertEqual(steps, 4 // replica_scale_factor)
# Computed steps is correct for specified batch size
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=None, batch_size=32)
self.assertEqual(batch_size, 32)
self.assertEqual(steps, 2 // replica_scale_factor)
@combinations.generate(all_strategy_combinations())
def test_calculating_input_params_with_steps_with_batch_size(
self, distribution):
with self.cached_session():
# No change to steps and batch size if both specified and feasible
steps, batch_size = distributed_training_utils.get_input_params(
distribution, 64, steps=5, batch_size=3)
self.assertEqual(batch_size, 3)
self.assertEqual(steps, 5)
# Number of samples is less than global batch size * steps
with self.assertRaisesRegexp(ValueError, 'less than samples required'):
distributed_training_utils.get_input_params(
distribution, 64, steps=10, batch_size=13)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_with_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
# Call fit with validation data
model.fit(
inputs,
targets,
epochs=1,
batch_size=2,
verbose=0,
validation_data=(inputs, targets))
# TODO(anjalisridhar): We need tests for when the batch size and steps
# are smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_with_mixed_precision(self, distribution):
if isinstance(distribution.extended,
parameter_server_strategy.ParameterServerStrategyExtended):
self.skipTest('b/152097775')
if isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
policy_name = 'mixed_bfloat16'
else:
policy_name = 'mixed_float16'
with self.cached_session(), \
distribution.scope(), \
policy.policy_scope(policy_name):
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
y = keras.layers.Activation('softmax', dtype='float32')(y)
model = keras.Model(x, y)
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer,
loss,
metrics=metrics)
# We need to pass float32 since TPUs do not support float64, even though
# these arrays will immediately be casted to bfloat16 on TPUs. We also
# cannot pass bfloat16, as Numpy does not support it.
inputs = np.zeros((64, 3), dtype='float32')
targets = np.zeros((64, 4), dtype='float32')
model.fit(
inputs,
targets,
epochs=1,
batch_size=2,
verbose=0,
validation_data=(inputs, targets))
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_operator_overload_mixed_precision(self, distribution):
# Regression test that tests a fixed bug does not reoccur. Adding an
# AutoCastVariable to a tensor on a TPU, where the variable was the LHS of
# the '+' operator, used to cause the gradient w.r.t. the variable to be
# None.
if isinstance(distribution.extended,
parameter_server_strategy.ParameterServerStrategyExtended):
self.skipTest('b/152097775')
if isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
policy_name = 'mixed_bfloat16'
else:
policy_name = 'mixed_float16'
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v', ())
self.v2 = self.add_weight('v', ())
def call(self, inp):
inp += self.v1
return self.v2 + inp
with self.cached_session(), distribution.scope():
layer = MyLayer(dtype=policy.Policy(policy_name))
def run_fn():
x = np.array([1.])
with backprop.GradientTape() as tape:
y = layer(x)
grad_v1, grad_v2 = tape.gradient(y, [layer.v1, layer.v2])
return grad_v1, grad_v2
if context.executing_eagerly():
run_fn = def_function.function(run_fn)
grad_v1, grad_v2 = distribution.run(run_fn)
self.assertIsNotNone(grad_v1)
self.assertIsNotNone(grad_v2)
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.one_device_strategy] +
tpu_strategies,
mode=['graph', 'eager']))
def test_optimizer_in_cross_replica_context_raises_error(self, distribution):
with self.cached_session(), distribution.scope():
model = keras.models.Sequential([keras.layers.Dense(1)])
x = np.array([[1.]])
with backprop.GradientTape() as tape:
y = model(x)
gradients = tape.gradient(y, model.trainable_variables)
optimizer = gradient_descent_keras.SGD()
with self.assertRaisesRegex(RuntimeError,
'cannot be called in cross-replica context'):
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_with_nested_numpy_arrays(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = 'mse'
model.compile(
optimizer,
loss)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
output_d_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
output_e_np = np.asarray(np.random.random((64, 7)), dtype=np.float32)
targets = [output_d_np, output_e_np]
# Call fit with validation data
model.fit(inputs, targets, epochs=1, batch_size=8, verbose=0)
# TODO(anjalisridhar): We need tests for when the batch size and steps are
# smaller and results in a 0 batch_size and steps value.
model.evaluate(inputs, targets)
model.evaluate(inputs, targets, batch_size=8)
model.predict(inputs)
model.predict(inputs, batch_size=8)
@combinations.generate(
combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager']))
def test_numpy_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
result = model.evaluate(
inputs,
targets,
batch_size=2,
sample_weight=sample_weights,
verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second time
# we divide by number of steps/batches that the loss is aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
result = model.evaluate(inputs, targets, batch_size=2, verbose=1)
self.assertAllClose(result, 13.5)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_flatten_predict_outputs(self, distribution):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((6, 5)), dtype=np.float32)
inputs = [input_a_np, input_b_np]
outs = model.predict(inputs)
# `predict` a list that is equal in length to the number of model outputs.
# In this test our model has two outputs and each element of `outs`
# corresponds to all the samples of one of the model outputs.
self.assertLen(outs, 2)
# Each of the output samples have a dimension of 7. We should process all
# the available input samples(6).
self.assertAllEqual([6, 7], outs[0].shape)
self.assertAllEqual([6, 7], outs[1].shape)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(batch_size=[4, 6])))
def test_evaluate_with_partial_batch(self, distribution, batch_size):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype('float32')
y = np.random.random((10, 4)).astype('float32')
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch. Also `evaluate()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
evaluate_ground_truth = cpu_model.evaluate(x, y)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
x, y, batch_size=batch_size, steps=steps)[1:],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5)
# Test that `steps` is inferred correctly when final partial batch exists.
self.assertAllClose(
model_with_ds_strategy.evaluate(x, y, batch_size=batch_size)[1:],
evaluate_ground_truth[1:],
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(
tpu_strategy_combinations_graph_only()))
def test_predict_with_partial_batch(self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(
optimizer,
loss)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch. Also `predict()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
predict_ground_truth = cpu_model.predict(inputs)
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4, steps=3),
predict_ground_truth,
atol=1e-5,
rtol=1e-5)
# Test that `steps` is inferred correctly when final partial batch exists.
self.assertAllClose(
model_with_ds_strategy.predict(inputs, batch_size=4),
predict_ground_truth,
atol=1e-5,
rtol=1e-5)
@combinations.generate(tpu_strategy_combinations_graph_only())
def test_no_target_model(self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
self.add_loss(math_ops.reduce_sum(inputs), inputs=True)
return inputs
with distribution.scope():
model = keras.models.Sequential()
model.add(
keras.layers.Dense(16, activation='relu', input_shape=_INPUT_SIZE))
model.add(MyLayer())
model.add(keras.layers.Dense(_NUM_CLASS, activation='softmax'))
model.compile(optimizer)
inputs = np.zeros((20, 10), np.float32)
model.fit(inputs, epochs=1, steps_per_epoch=2)
model.predict(inputs, steps=1)
model.evaluate(inputs, steps=1)
@combinations.generate(
combinations.times(
tpu_strategy_combinations_graph_only()))
def test_predict_multi_output_model_with_partial_batch(
self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(
optimizer,
loss)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
'input_a': input_data['input_a'],
'input_b': input_data['input_b'],
}
# As sample size is 200, we batch by 18 so that the last batch is
# a partial batch. Also `fit()` using numpy array as inputs without
# distribution strategy uses entire sample as a single batch. As so,
# we remove parameters `batch_size` and `steps`.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(input_dict, batch_size=18, steps=12),
cpu_model.predict(input_dict),
atol=1e-4,
rtol=1e-4)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_gradients_are_none(self, distribution):
if not context.executing_eagerly():
self.skipTest('None gradients are not supported in graph mode')
class DenseWithExtraWeight(keras.layers.Dense):
def build(self, input_shape):
# Gradients w.r.t. extra_weights are None
self.extra_weight_1 = self.add_weight('extra_weight_1', shape=(),
initializer='ones')
super(DenseWithExtraWeight, self).build(input_shape)
self.extra_weight_2 = self.add_weight('extra_weight_2', shape=(),
initializer='ones')
with distribution.scope():
model = keras.Sequential([DenseWithExtraWeight(4, input_shape=(4,))])
model.compile('adam', 'mse')
inputs = np.random.normal(size=(64, 4))
targets = np.random.normal(size=(64, 4))
old_kernel = model.get_weights()[1]
model.fit(inputs, targets)
new_kernel = model.get_weights()[1]
self.assertNotAllEqual(old_kernel, new_kernel)
class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_on_same_dataset(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
dataset = get_dataset(distribution)
# Call fit with validation data
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2)
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
validation_data=dataset,
validation_steps=2)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_model_interleaved_eval_same_as_direct_eval(
self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
user_controlled_model = get_model()
user_controlled_model.compile(
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()])
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
interleaved_model.compile(
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()])
dataset = get_dataset(distribution)
# Call fit with validation interleaved
interleaved_output = interleaved_model.fit(
dataset,
epochs=2,
steps_per_epoch=2,
verbose=1,
validation_data=dataset,
validation_steps=2,
shuffle=False)
# Manually control the validation running after each epoch.
user_controlled_output = []
for _ in range(2):
user_controlled_model.fit(
dataset, epochs=1, steps_per_epoch=2, verbose=1, shuffle=False)
user_controlled_output.append(
user_controlled_model.evaluate(dataset, steps=2))
self.assertEqual(interleaved_output.history['val_loss'],
[x[0] for x in user_controlled_output])
val_mean_absolute_error = interleaved_output.history.get(
'val_mean_absolute_error')
if not val_mean_absolute_error:
# The name of the metric changed in TF2.0
val_mean_absolute_error = interleaved_output.history['val_mae']
self.assertEqual(val_mean_absolute_error,
[x[1] for x in user_controlled_output])
self.assertEqual(interleaved_output.history['val_categorical_accuracy'],
[x[2] for x in user_controlled_output])
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
input_a_np = np.random.random((10, 3)).astype('float32')
input_b_np = np.random.random((10, 5)).astype('float32')
output_d_np = np.random.random((10, 7)).astype('float32')
output_e_np = np.random.random((10, 7)).astype('float32')
# Test with tuples
dataset_tuple = dataset_ops.Dataset.from_tensor_slices(
((input_a_np, input_b_np), (output_d_np, output_e_np)))
dataset_tuple = dataset_tuple.repeat(100)
dataset_tuple = dataset_tuple.batch(10)
model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)
# Test with dict
dataset_dict = dataset_ops.Dataset.from_tensor_slices(({
'input_a': input_a_np,
'input_b': input_b_np
}, (output_d_np, output_e_np)))
dataset_dict = dataset_dict.repeat(100)
dataset_dict = dataset_dict.batch(10)
model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_with_dictionary_in_the_dataset_b135161171(
self, distribution):
def custom_loss(predict, label, weight):
bce = keras.losses.binary_crossentropy(label, predict)
return math_ops.reduce_mean(bce * weight)
with self.cached_session():
with distribution.scope():
input_img = keras.layers.Input([64, 64, 3], name='img')
input_lbl = keras.layers.Input([64, 64, 1], name='lbl')
input_weight = keras.layers.Input([64, 64], name='weight')
predict = keras.layers.Conv2D(2, [1, 1], padding='same')(input_img)
loss_lambda = keras.layers.Lambda(
lambda x: custom_loss(*x), name='my_loss')
my_loss = loss_lambda([predict, input_lbl, input_weight])
model = keras.models.Model(
inputs=[input_img, input_lbl, input_weight],
outputs=[predict, my_loss])
model.add_loss(model.get_layer('my_loss').output)
model.compile(
optimizer='adam')
if context.executing_eagerly():
def map_fn(img, lbl, weight):
inputs = {'img': img, 'lbl': lbl, 'weight': weight}
return (inputs,)
else:
def map_fn(img, lbl, weight):
inputs = {'img': img, 'lbl': lbl, 'weight': weight}
return inputs, {}
fake_imgs = np.ones([50, 64, 64, 3], dtype=np.float32)
fake_lbls = np.ones([50, 64, 64, 1], dtype=np.float32)
fake_weights = np.ones([50, 64, 64], dtype=np.float32)
data = dataset_ops.Dataset.from_tensor_slices(
(fake_imgs, fake_lbls, fake_weights)).map(map_fn).batch(10)
model.fit(data)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_eval_and_predict_methods_on_dataset_without_steps(
self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(
inputs, targets, epochs=1, batch_size=10).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.batch(10, drop_remainder=True)
fit_with_ds = model.fit(dataset, epochs=1).history
eval_with_ds = model.evaluate(dataset)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.batch(10, drop_remainder=True)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
@combinations.generate(
combinations.times(
strategy_minus_tpu_combinations()))
def test_on_dataset_with_unknown_cardinality_without_steps(
self, distribution, mode):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
fit_with_numpy = model.fit(
inputs, targets, epochs=1, batch_size=10).history
fit_with_numpy_multiple_epochs = model.fit(
inputs, targets, epochs=2, batch_size=10).history
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(predict_dataset)),
cardinality.UNKNOWN)
eval_with_ds = model.evaluate(dataset)
predict_with_ds = model.predict(predict_dataset)
self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
fit_with_ds = model.fit(dataset, epochs=1).history
fit_with_ds_multiple_epochs = model.fit(dataset, epochs=2).history
self.assertAllClose(fit_with_numpy, fit_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
fit_with_numpy_multiple_epochs,
fit_with_ds_multiple_epochs,
atol=1e-4,
rtol=1e-4)
@combinations.generate(
combinations.times(
tpu_strategy_combinations()))
def test_on_dataset_with_unknown_cardinality(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
gradient_descent.GradientDescentOptimizer(0.001),
loss,
metrics=metrics)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
# steps/steps_per_epoch are calculated when using numpy arrays as
# input data.
eval_with_numpy = model.evaluate(inputs, targets, batch_size=10)
predict_with_numpy = model.predict(inputs, batch_size=10)
dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs, targets)
predict_dataset = convert_numpy_to_dataset_with_unknown_cardinality(
inputs)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(dataset)),
cardinality.UNKNOWN)
self.assertEqual(
keras.backend.get_value(cardinality.cardinality(predict_dataset)),
cardinality.UNKNOWN)
eval_with_ds = model.evaluate(dataset, steps=100)
predict_with_ds = model.predict(predict_dataset, steps=100)
self.assertAllClose(eval_with_numpy, eval_with_ds, atol=1e-4, rtol=1e-4)
self.assertAllClose(
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
with self.assertRaisesRegexp(ValueError,
'Number of steps could not be inferred'):
model.fit(dataset, epochs=1)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_eval_and_predict_methods_on_dataset(
self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.001)
model = get_model()
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer,
loss,
metrics=metrics)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
model.compile(
optimizer(),
loss)
dataset = get_dataset(distribution)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
model.evaluate(dataset, steps=2, verbose=1)
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.one_device_strategy
],
mode=['graph', 'eager']))
def test_dataset_wrong_input_shape(self, distribution, mode):
if mode == 'graph':
self.skipTest(
'TODO(b/120943676, b/120957836): Re-enable for graph once the '
'validation code is restored.')
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(
optimizer,
loss)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
targets = np.zeros((10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
with self.assertRaisesRegexp(ValueError, 'incompatible with the layer'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
def test_dataset_external_batch_input_validation(
self, distribution):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(
optimizer,
loss)
# Batching is done outside tf.data's `batch`
inputs = np.zeros((100, 10, 3), dtype=np.float32)
targets = np.zeros((100, 10, 4), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat(100)
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
with self.cached_session():
with distribution.scope():
x = keras.layers.Input(shape=(1,), name='input')
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
z = keras.layers.Dropout(0.9999)(y)
model = keras.Model(x, z)
initial_weights = model.get_weights()
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(0.005)
loss = 'mse'
metrics = ['acc']
model.compile(
optimizer,
loss,
metrics=metrics)
batch_size = 8
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# MirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
targets = np.ones((10, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
dataset = dataset.repeat().batch(batch_size)
hist = model.fit(dataset, epochs=1, steps_per_epoch=20, verbose=1)
self.assertAlmostEqual(hist.history['acc'][0], 0, 0)
with distribution.scope():
model.set_weights(initial_weights)
# TODO(psv/anjalisridhar): Enable these lines after we fix b/117431185.
# evaluate_output = model.evaluate(dataset, steps=20)
# self.assertAlmostEqual(evaluate_output[1], 1, 0)
inputs = np.ones((10, 1), dtype=np.float32)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
predict_dataset = predict_dataset.repeat().batch(batch_size)
output = model.predict(predict_dataset, steps=10)
# `predict` runs for 10 steps
ref_output = np.ones((160, 1), dtype=np.float32)
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def testOptimizerWithCallbacks(self, distribution):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(
optimizer,
loss)
dataset = get_dataset(distribution)
def schedule(_):
return 0.001
model.fit(
dataset,
epochs=1,
steps_per_epoch=2,
verbose=0,
callbacks=[keras.callbacks.LearningRateScheduler(schedule)])
self.assertAllClose(0.001, keras.backend.get_value(model.optimizer.lr))
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(batch_size=[4, 6])))
def test_evaluate_with_dataset_with_partial_batch(self, distribution,
batch_size):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(optimizer, loss, metrics=metrics)
cpu_model = get_model()
cpu_model.compile(optimizer, loss, metrics=metrics)
x = np.random.random((10, 3)).astype('float32')
y = np.random.random((10, 4)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
# As sample size is 10, we make the last batch a partial batch.
cpu_model.set_weights(model_with_ds_strategy.get_weights())
dataset_with_partial_batch = dataset.batch(batch_size)
# We don't compare the loss as loss is currently not computed as metric
# in Keras, the loss value is inaccurate for last partial batch due to
# more weights for the last batch samples.
steps = np.ceil(10.0 / batch_size)
self.assertAllClose(
model_with_ds_strategy.evaluate(
dataset_with_partial_batch, steps=steps)[1:],
cpu_model.evaluate(dataset_with_partial_batch, steps=steps)[1:],
atol=1e-5,
rtol=1e-5)
self.assertAllClose(
model_with_ds_strategy.evaluate(dataset_with_partial_batch)[1:],
cpu_model.evaluate(dataset_with_partial_batch)[1:],
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(
tpu_strategy_combinations_graph_only()))
def test_predict_with_dataset_with_partial_batch(
self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(
optimizer,
loss)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
inputs = np.random.random((10, 3)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((inputs))
# As sample size is 10, we batch by 4 so that the last batch is
# a partial batch.
dataset_with_partial_batch = dataset.batch(4)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(dataset_with_partial_batch, steps=3),
cpu_model.predict(dataset_with_partial_batch, steps=3),
atol=1e-5,
rtol=1e-5)
@combinations.generate(
combinations.times(
tpu_strategy_combinations_graph_only()))
def test_predict_multi_output_model_with_dataset_with_partial_batch(
self, distribution):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(
optimizer,
loss)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
input_data, _ = get_multi_inputs_multi_outputs_data()
input_dict = {
'input_a': input_data['input_a'],
'input_b': input_data['input_b'],
}
dataset = dataset_ops.Dataset.from_tensor_slices(input_dict)
# As sample size is 200, we batch by 18 using 12 steps per epoch so
# that the last batch is a partial batch.
dataset_with_partial_batch = dataset.batch(18)
cpu_model.set_weights(model_with_ds_strategy.get_weights())
self.assertAllClose(
model_with_ds_strategy.predict(dataset_with_partial_batch, steps=12),
cpu_model.predict(dataset_with_partial_batch, steps=12),
atol=1e-4,
rtol=1e-4)
@combinations.generate(all_strategy_combinations_minus_default())
def test_match_model_input_matches_with_dataset_tensors(self, distribution):
def _create_model_input_output_tensors():
input_a = keras.layers.Input(shape=(16,), name='z_input_sorted_last')
input_b = keras.layers.Input(shape=(32,), name='a_input_sorted_first')
intermediate_a = keras.layers.Dense(10)(input_a)
intermediate_b = keras.layers.Dense(10)(input_b)
merged = keras.layers.Add()([intermediate_a, intermediate_b])
output = keras.layers.Dense(2)(merged)
return input_a, input_b, output
input_dict = {
'z_input_sorted_last': np.random.rand(32, 16).astype(np.float32),
'a_input_sorted_first': np.random.rand(32, 32).astype(np.float32)
}
target = np.ones((32, 2), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((input_dict, target))
dataset = dataset.batch(4, drop_remainder=True)
with self.cached_session():
with distribution.scope():
input_a, input_b, output = _create_model_input_output_tensors()
# `input_a`, which has input name that comes last in alphanumeric
# order, is the first input of the model input layers. If tensors
# from `input_dict` is blindly flattened and passed to model
# inputs incorrectly, this would result in `input_a` input layer
# matching with tensor `a_input_sorted_first` and would result in
# shape mismatch.
model_with_array_input = keras.models.Model(
inputs=[input_a, input_b], outputs=output)
model_with_array_input.compile('sgd', 'mse')
model_weights = model_with_array_input.get_weights()
model_with_array_input_fit = model_with_array_input.fit(
dataset, steps_per_epoch=1, epochs=1).history
input_a, input_b, output = _create_model_input_output_tensors()
model_with_dict_input = keras.models.Model(
inputs={
'z_input_sorted_last': input_a,
'a_input_sorted_first': input_b,
},
outputs=output)
model_with_dict_input.compile('sgd', 'mse')
model_with_dict_input.set_weights(model_weights)
model_with_dict_input_fit = model_with_dict_input.fit(
dataset, steps_per_epoch=1, epochs=1).history
self.assertAllClose(
model_with_dict_input_fit,
model_with_array_input_fit,
atol=1e-4,
rtol=1e-4)
@combinations.generate(
combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager']))
def test_dataset_with_sample_weights(self, distribution):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer,
loss)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)
ds = dataset_ops.Dataset.from_tensor_slices(
(inputs, targets, sample_weights)).batch(2)
result = model.evaluate(ds, verbose=1)
# The per sample loss is multipled by the corresponding sample weight. The
# average of these weighted losses is the return value of the `evaluate`
# call. For example, in the test above the average weighted loss is
# calculated in the following manner:
# batch_1 = (((2-0)^2) * 0.25 + ((4-1)^2) * 0.5) / 2 = 5.5 / 2 = 2.75
# batch_2 = (((6-2)^2 * 0.75) + ((8-3)^2 * 1)) / 2 = 37 / 2 = 18.5
# final result = (batch_1 + batch_2) / 2 = 10.625.
# The first time we divide by number of input samples and the second time
# we divide by number of steps/batches that the loss is aggregated over.
self.assertAllClose(result, 10.625)
# We now test without passing sample_weights:
# batch_1 = ((2-0)^2) + ((4-1)^2) / 2 = 13 / 2 = 6.5
# batch_2 = ((6-2)^2) + ((8-3)^2) / 2 = 41 / 2 = 20.5
# final result = (batch_1 + batch_2) / 2 = 27 / 2 = 13.5
ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets)).batch(2)
result = model.evaluate(ds, verbose=1)
self.assertAllClose(result, 13.5)
class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
class IdentityRegularizer(keras.regularizers.Regularizer):
def __call__(self, x):
return array_ops.identity(x)
class AddLayer(keras.layers.Layer):
def build(self, _):
self.v = self.add_weight(
'v', (),
initializer='ones',
regularizer=TestRegularizerLoss.IdentityRegularizer())
def call(self, inputs):
return inputs + self.v
@staticmethod
def loss_fn(_, y_pred):
return math_ops.reduce_mean(y_pred)
@combinations.generate(
combinations.times(
strategy_combinations.all_strategy_combinations_minus_default()))
def test_regularizer_loss(self, distribution):
batch_size = 2
if not distributed_training_utils.global_batch_size_supported(distribution):
batch_size //= distribution.num_replicas_in_sync
# Given an input x, which is always 1, and variable v, this model computes
# Loss=x+v+regularizer_loss, where regularizer_loss=v and the variable is
# initialized to 1. Therefore, this model computes Loss=1+2v, and so the
# gradient dLoss/dv = 2. This gradient of 2 is averaged over all examples
# in a batch and then multiplied by the learning rate of 1. As a result,
# the model update for one batch should subtract 2 from v, resulting in v
# being -1. If the regularizer loss is not scaled correctly by number of
# replicas, the variable value will be incorrect when number of replicas
# >1. For e.g. it will be -2 if num replicas = 2.
with distribution.scope():
x = keras.layers.Input(shape=(1,), batch_size=batch_size)
y = TestRegularizerLoss.AddLayer()(x)
model = keras.models.Model(inputs=x, outputs=y)
opt = gradient_descent_keras.SGD(1.)
model.compile(
opt,
loss=TestRegularizerLoss.loss_fn)
model.fit(
x=np.array([[1.], [1.]], dtype=np.float32),
y=np.array([[1.], [1.]], dtype=np.float32),
batch_size=batch_size)
v = model.get_weights()[0]
self.assertEqual(-1.0, v)
class TestDistributionStrategyWithKerasModels(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_distribution_strategy_on_sequential_model(
self, distribution):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = simple_sequential_model()
loss = 'mse'
model.compile(
optimizer,
loss)
inputs = np.zeros((20, 10), np.float32)
targets = np.zeros((20, 2), np.float32)
model.fit(inputs, targets, epochs=1, batch_size=10)
model.predict(inputs, batch_size=10)
model.evaluate(inputs, targets, batch_size=10)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_distribution_strategy_on_functional_model(
self, distribution):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(
optimizer,
loss)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
model.fit(inputs, targets, epochs=1)
model.predict(inputs)
model.evaluate(inputs, targets)
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_distributed_dataset(self, distribution):
with distribution.scope():
class CBCounter(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
self.train_batches = 0
self.test_batches = 0
def on_epoch_end(self, batch, logs=None):
self.epochs += 1
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cb_counter = CBCounter()
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = dataset_ops.DatasetV2.from_tensor_slices((x, y))
ds = ds.batch(10).repeat(2)
ds = distribution.experimental_distribute_dataset(ds)
val_ds = dataset_ops.DatasetV2.from_tensor_slices((x, y))
val_ds = val_ds.batch(20)
val_ds = distribution.experimental_distribute_dataset(val_ds)
model.fit(
ds,
steps_per_epoch=10,
validation_data=val_ds,
validation_steps=5,
epochs=2,
callbacks=[cb_counter])
self.assertEqual(cb_counter.train_batches, 20)
self.assertEqual(cb_counter.test_batches, 10)
self.assertEqual(cb_counter.epochs, 2)
# Check for `steps_per_epoch`.
if distribution.num_replicas_in_sync > 1:
with self.assertRaisesRegexp(ValueError,
'distributed dataset, you must specify'):
model.fit(ds, epochs=2)
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_distributed_datasets_from_function(self, distribution):
with distribution.scope():
class CBCounter(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
self.train_batches = 0
self.test_batches = 0
def on_epoch_end(self, batch, logs=None):
self.epochs += 1
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
cb_counter = CBCounter()
def make_dataset(_):
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = dataset_ops.DatasetV2.from_tensor_slices((x, y))
ds = ds.batch(5).repeat()
return ds
ds = distribution.experimental_distribute_datasets_from_function(
make_dataset)
val_ds = distribution.experimental_distribute_datasets_from_function(
make_dataset)
model.fit(
ds,
steps_per_epoch=10,
validation_data=val_ds,
validation_steps=5,
epochs=2,
callbacks=[cb_counter])
self.assertEqual(cb_counter.train_batches, 20)
self.assertEqual(cb_counter.test_batches, 10)
self.assertEqual(cb_counter.epochs, 2)
# Check for `steps_per_epoch`.
if distribution.num_replicas_in_sync > 1:
with self.assertRaisesRegexp(ValueError,
'distributed dataset, you must specify'):
model.fit(ds, epochs=2)
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_host_training_loop(self, distribution):
with distribution.scope():
inputs = keras.Input((10, 10, 3))
x = keras.layers.Conv2D(3, kernel_size=3)(inputs)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse', experimental_steps_per_execution=10)
bc = BatchCountingCB()
x, y = np.ones((100, 10, 10, 3)), np.ones((100, 1))
model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.train_end_batches, [9, 19, 29, 39, 49])
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.test_end_batches, [9, 19, 29, 39, 49])
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 10, 20, 30, 40])
self.assertEqual(bc.predict_end_batches, [9, 19, 29, 39, 49])
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_host_training_loop_last_partial_execution(self, distribution):
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse', experimental_steps_per_execution=20)
bc = BatchCountingCB()
x, y = np.ones((100, 10)), np.ones((100, 1))
model.fit(x, y, batch_size=2, epochs=1, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 20, 40])
self.assertEqual(bc.train_end_batches, [19, 39, 49])
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 20, 40])
self.assertEqual(bc.test_end_batches, [19, 39, 49])
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
self.assertEqual(bc.predict_end_batches, [19, 39, 49])
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_host_training_loop_dataset_unknown_size(self, distribution):
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse', experimental_steps_per_execution=20)
x, y = np.ones((100, 10)), np.ones((100, 1))
ds = dataset_ops.DatasetV2.from_tensor_slices((x, y)).batch(2)
ds = ds.filter(lambda *args, **kwargs: True) # Makes the size UNKNOWN.
bc = BatchCountingCB()
with self.assertRaisesRegexp(ValueError, 'steps_per_execution'):
model.fit(ds, epochs=2, callbacks=[bc])
train_ds = ds.repeat(2)
model.fit(train_ds, steps_per_epoch=50, epochs=2, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40])
self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49])
with self.assertRaisesRegexp(ValueError, 'steps_per_execution'):
model.evaluate(ds, callbacks=[bc])
test_ds = ds.repeat(2)
model.evaluate(test_ds, steps=50, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0, 20, 40])
self.assertEqual(bc.test_end_batches, [19, 39, 49])
predict_ds = ds.repeat(2)
model.predict(predict_ds, steps=50, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0, 20, 40])
self.assertEqual(bc.predict_end_batches, [19, 39, 49])
@combinations.generate(
combinations.combine(distribution=all_strategies, mode=['eager']))
def test_host_training_loop_truncate_to_epoch(self, distribution):
with distribution.scope():
inputs = keras.Input(10)
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile('sgd', 'mse', experimental_steps_per_execution=500)
x, y = np.ones((100, 10)), np.ones((100, 1))
bc = BatchCountingCB()
model.fit(x, y, batch_size=2, epochs=2, callbacks=[bc])
self.assertEqual(bc.train_begin_batches, [0, 0])
self.assertEqual(bc.train_end_batches, [49, 49])
x, y = np.ones((50, 10)), np.ones((50, 1))
model.evaluate(x, y, batch_size=2, callbacks=[bc])
self.assertEqual(bc.test_begin_batches, [0])
self.assertEqual(bc.test_end_batches, [24])
x = np.ones((50, 10))
model.predict(x, batch_size=2, callbacks=[bc])
self.assertEqual(bc.predict_begin_batches, [0])
self.assertEqual(bc.predict_end_batches, [24])
@combinations.generate(
combinations.times(
all_strategy_combinations_minus_default()))
def test_distribution_strategy_one_dimensional(self, distribution):
with distribution.scope():
inp = keras.layers.Input(shape=(10,))
out = keras.layers.Dense(3, activation='softmax')(inp)
model = keras.Model(inputs=[inp], outputs=[out])
model.compile(
optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
x = np.random.random((64, 10)).astype('float32')
y = np.random.randint(3, size=64)
model.fit(x, y, epochs=1, steps_per_epoch=2)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager'],
reduction=[
loss_reduction.ReductionV2.AUTO,
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.SUM
]))
def test_distribution_strategy_with_loss_reduction_types(
self, distribution, reduction):
np.random.seed(_RANDOM_SEED)
def _get_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.random.random((64, 10))
y = np.random.random((64, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(32)
model = _get_model()
model.compile(
'sgd', loss=keras.losses.MeanSquaredError(reduction=reduction))
history = model.fit(dataset, steps_per_epoch=2, epochs=1, shuffle=False)
with distribution.scope():
ds_model = _get_model()
ds_model.compile(
'sgd',
loss=keras.losses.MeanSquaredError(reduction=reduction))
ds_history = ds_model.fit(
dataset, steps_per_epoch=2, epochs=1, shuffle=False)
self.assertArrayNear(history.history['loss'], ds_history.history['loss'],
1e-5)
@combinations.generate(
combinations.times(
all_strategy_combinations_minus_default()))
def test_distribution_strategy_with_symbolic_add_loss(
self, mode, distribution):
def _make_model_with_add_loss():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(x1))
model.add_loss(math_ops.reduce_mean(outputs))
return model
x = np.ones((64, 10)).astype('float32')
model = _make_model_with_add_loss()
model.compile('sgd')
history = model.fit(x, epochs=1)
with distribution.scope():
ds_model = _make_model_with_add_loss()
ds_model.compile(
'sgd')
ds_history = ds_model.fit(x, epochs=1)
self.assertAllClose(history.history, ds_history.history)
# TODO(omalleyt): Investigate flakiness and re-enable.
@combinations.generate(all_strategy_minus_default_and_tpu_combinations())
def DISABLED_test_distribution_strategy_with_callable_add_loss(
self, distribution):
def _make_model():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = keras.layers.Dense(10, kernel_initializer='zeros')(x1)
d = keras.layers.Dense(1, kernel_initializer='zeros')
outputs = d(x2)
model = keras.Model(inputs, outputs)
model.add_loss(lambda: 100. * math_ops.reduce_mean(d.kernel))
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model()
self.assertLen(model.losses, 1)
model.compile('sgd', 'mse')
history = model.fit(x, y, steps_per_epoch=2, epochs=1)
with distribution.scope():
ds_model = _make_model()
self.assertLen(ds_model.losses, 1)
ds_model.compile('sgd', 'mse')
ds_history = ds_model.fit(x, y, steps_per_epoch=2, epochs=1)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.times(
all_strategy_minus_default_and_tpu_combinations()))
def test_distribution_strategy_with_add_metric_in_call(
self, distribution):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
def call(self, inputs):
self.add_metric(
math_ops.reduce_mean(inputs), name='bias', aggregation='mean')
return inputs + self.bias
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile(
'sgd',
'mse')
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
# includes stateful loss metric in eager.
metrics_len = 2 if context.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['eager']))
def test_distribution_strategy_with_add_metric_object(
self, distribution):
class Bias(keras.layers.Layer):
def build(self, input_shape):
self.bias = self.add_weight(name='bias', initializer='zeros', shape=())
self.mean = keras.metrics.Mean(name='mean')
def call(self, inputs):
self.add_metric(self.mean(inputs))
return inputs + self.bias
def _make_model_with_add_metric_object():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
x2 = Bias()(x1)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x2)
model = keras.Model(inputs, outputs)
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric_object()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric_object()
self.assertLen(ds_model.metrics, 1)
ds_model.compile(
'sgd',
'mse')
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
# includes stateful loss metric in eager.
metrics_len = 2 if context.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
# TODO(phillypham): Why does validation_steps > 1 not work on TPUs?
combinations.times(
all_strategy_minus_default_and_tpu_combinations()))
def test_distribution_strategy_with_add_metric_outside_call(
self, distribution):
def _make_model_with_add_metric():
inputs = keras.Input((10,))
x1 = keras.layers.Dense(10, kernel_initializer='zeros')(inputs)
outputs = keras.layers.Dense(1, kernel_initializer='zeros')(x1)
model = keras.Model(inputs, outputs)
model.add_metric(
math_ops.reduce_mean(x1), name='mid_mean', aggregation='mean')
return model
x = np.ones((64, 10)).astype('float32')
y = np.ones((64, 1)).astype('float32')
model = _make_model_with_add_metric()
self.assertLen(model.metrics, 1)
model.compile('sgd', 'mse')
history = model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile(
'sgd',
'mse')
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
# includes stateful loss metric in eager.
metrics_len = 2 if context.executing_eagerly() else 1
self.assertLen(ds_model.metrics, metrics_len)
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.combine(
distribution=strategies_minus_tpu,
mode=['eager']))
def test_sparse_tensor_outputs(self, distribution):
class ToSparse(keras.layers.Layer):
"""Create a sparse tensor based on a given dense tensor."""
def call(self, inputs):
indices = array_ops.where_v2(math_ops.not_equal(inputs, 0))
values = array_ops.gather_nd(inputs, indices)
shape = array_ops.shape(inputs, out_type='int64')
return sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
model = keras.Sequential([ToSparse()])
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data, batch_size=2)
expected_indices = np.array([[0, 0], [1, 0], [1, 1]])
expected_values = np.array([1, 2, 3])
expected_dense_shape = np.array([2, 3])
self.assertAllEqual(output.indices, expected_indices)
self.assertAllEqual(output.values, expected_values)
self.assertAllEqual(output.dense_shape, expected_dense_shape)
@combinations.generate(
combinations.combine(
distribution=strategies_minus_tpu,
mode=['eager']))
def test_ragged_tensor_outputs(self, distribution):
class ToRagged(keras.layers.Layer):
"""Create a ragged tensor based on a given dense tensor."""
def __init__(self, padding, ragged_rank=1, **kwargs):
super(ToRagged, self).__init__(**kwargs)
self._padding = padding
self._ragged_rank = ragged_rank
def call(self, inputs):
return ragged_tensor.RaggedTensor.from_tensor(
inputs, padding=self._padding, ragged_rank=self._ragged_rank)
model = keras.Sequential([ToRagged(padding=0)])
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
output = model.predict(input_data, batch_size=2)
expected_values = [[1], [2, 3]]
self.assertAllEqual(expected_values, output)
@combinations.generate(
combinations.combine(
distribution=strategies_minus_default_minus_tpu + tpu_strategies,
mode=['eager']))
def test_correctness_of_add_loss_with_merge_call(self, distribution):
batch_size = 32
def _get_model():
inputs = keras.layers.Input(shape=(1,))
labels = keras.layers.Input(shape=(1,))
x = keras.layers.Dense(10, activation='relu')(inputs)
y = keras.layers.Dense(1)(x)
model = keras.models.Model([inputs, labels], y)
model.add_loss(keras.losses.mean_squared_error(labels, y))
return model
def _get_data():
x_train = np.random.rand(64, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
dataset = dataset_ops.DatasetV2.from_tensor_slices((x_train, y_train))
dataset = dataset.batch(batch_size)
return dataset
with distribution.scope():
model = _get_model()
optimizer = gradient_descent_keras.SGD(0.2)
@def_function.function
def train_step(dist_inputs):
def step_fn(inputs):
with backprop.GradientTape() as tape:
logits = model(inputs)
# Invoke a merge_call()
distribution_strategy_context.get_replica_context().merge_call(
lambda d: None)
# Verify that there is only one loss on the model.
assert len(model.losses) == 1
loss_from_model = math_ops.reduce_sum(
model.losses) * 1.0 / batch_size
# Compute loss in this loop.
loss = keras.losses.mean_squared_error(inputs[1], logits)
loss = nn.compute_average_loss(loss, global_batch_size=batch_size)
# Verify that the loss computed in this loop is equivalent to the
# loss from the model that was added via add_loss.
check_ops.assert_equal(loss, loss_from_model)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
per_replica_losses = distribution.run(step_fn, args=(dist_inputs,))
return distribution.reduce(
reduce_util.ReduceOp.SUM, per_replica_losses, axis=None)
dataset = distribution.experimental_distribute_dataset(_get_data())
for _ in range(2):
for x in dataset:
train_step(x)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_unimplemented_parameter_server_strategy(self):
cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2)
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type='worker',
task_id=1,
num_accelerators={'GPU': 0})
distribution = parameter_server_strategy.ParameterServerStrategy(
cluster_resolver)
self.assertIsInstance(distribution,
(parameter_server_strategy.ParameterServerStrategyV1,
parameter_server_strategy.ParameterServerStrategy))
with self.assertRaisesRegexp(NotImplementedError,
'ParameterServerStrategy*'):
with distribution.scope():
model = simple_sequential_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss)
# Models to exercise inserting ancillary layers with add_loss and add_metric.
def _functional_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
inputs = keras.Input(input_shape, name='images')
x = keras.layers.Conv2D(32, kernel_size=5, activation='relu')(inputs)
x = keras.layers.MaxPooling2D(pool_size=2)(x)
x = keras.layers.Conv2D(64, kernel_size=5, activation='relu')(x)
x = keras.layers.MaxPooling2D(pool_size=2)(x)
# Apply L2 regularization to embedding. Use a mix of TensorFlow ops and layers
# to exercise all code paths.
x = keras.layers.Flatten(name='embedding')(x)
l2_loss = math_ops.reduce_mean(math_ops.reduce_sum(math_ops.square(x), -1))
# Apply L1 regularization to next layer.
x = keras.layers.Dense(1024, activation='relu', name='sparse_embedding')(x)
l1_loss = keras.layers.Lambda(
lambda x: math_ops.reduce_mean(math_ops.reduce_sum(x, -1)),
name='l1_loss')(
x)
outputs = keras.layers.Dense(num_classes, name='logits')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Weight regularization terms.
model.add_loss(keras.layers.Lambda(lambda x: x * l2)(l2_loss))
model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
model.add_loss(l1_loss * l1)
model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
return model
def _sequential_with_add_loss_and_metric(input_shape, num_classes, l1, l2):
model = keras.Sequential([
keras.layers.Conv2D(
32, kernel_size=5, activation='relu', input_shape=input_shape),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=5, activation='relu'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(name='embedding'),
keras.layers.Dense(1024, activation='relu', name='sparse_embedding'),
keras.layers.Dense(num_classes, name='logits'),
])
# Extract layer outputs, add regularization terms, and rescale the metric.
# Use a mix of TensorFlow ops and layers to exercise all code paths.
x = model.get_layer('sparse_embedding').get_output_at(-1)
l1_loss = l1 * math_ops.reduce_mean(math_ops.reduce_sum(x, -1))
model.add_loss(l1_loss)
model.add_metric(
keras.layers.Lambda(lambda x: math_ops.divide(x, l1))(l1_loss),
aggregation='mean',
name='l1_loss')
x = model.get_layer('embedding').get_output_at(-1)
l2_loss = keras.layers.Lambda(
lambda x: l2 * math_ops.reduce_mean(math_ops.reduce_sum(x * x, -1)),
name='l2_loss')(
x)
model.add_loss(l2_loss)
model.add_metric(l2_loss / l2, aggregation='mean', name='l2_loss')
return model
def _functional_with_layer_reuse(input_shape, num_classes, l1, l2):
base_model = keras.Sequential([
keras.layers.Conv2D(
32, kernel_size=5, activation='relu', input_shape=input_shape),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(64, kernel_size=5, activation='relu'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(1024, activation='relu'),
keras.layers.Dense(num_classes, name='logits'),
])
inputs = keras.Input(input_shape, name='images')
logits = base_model(inputs)
model = keras.Model(inputs=inputs, outputs=logits)
# Reuse sequential layer and create new nodes.
zero_logits = base_model(array_ops.zeros_like(inputs))
one_logits = base_model(array_ops.ones_like(inputs))
# L2 loss.
l2_loss = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(logits - zero_logits), -1))
model.add_loss(l2_loss * l2)
model.add_metric(l2_loss, aggregation='mean', name='l2_loss')
# L1 loss.
l1_loss = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.abs(logits - one_logits), -1))
model.add_loss(l1_loss * l1)
model.add_metric(l1_loss, aggregation='mean', name='l1_loss')
return model
class TestDistributionStrategyWithMultipleAddLossAndMetricCalls(
test.TestCase, parameterized.TestCase):
"""Tests complex models with multiple add loss and metric calls."""
@combinations.generate(
combinations.times(
all_strategy_combinations_minus_default(),
combinations.combine(
model_fn=[
_functional_with_add_loss_and_metric,
_sequential_with_add_loss_and_metric,
_functional_with_layer_reuse,
],
l1=[0.01],
l2=[0.1])))
def test_fit_and_evaluate(self, distribution, model_fn, l1, l2):
# Make fake MNIST-like image data.
np.random.seed(_RANDOM_SEED)
dataset = dataset_ops.DatasetV2.from_tensor_slices(
(np.random.uniform(size=(64, 28, 28, 1)).astype(np.float32),
np.random.randint(0, 10, size=(64,))))
dataset = dataset.shuffle(64).batch(
8 * distribution.num_replicas_in_sync, drop_remainder=True)
# Make model with distribution strategy and initialize with dataset shape.
input_shape = dataset_ops.get_structure(dataset)[0].shape[1:]
with distribution.scope():
model = model_fn(input_shape, 10, l1, l2)
model.compile(
optimizer=keras.optimizers.adam_v2.Adam(1e-4),
loss=keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE),
metrics=[
keras.metrics.SparseCategoricalAccuracy(),
keras.metrics.SparseCategoricalCrossentropy(from_logits=True),
])
# Non-eager training doesn't support steps_per_epoch=None.
for unused_epoch in range(2):
model.fit(dataset)
results = dict(zip(model.metrics_names, model.evaluate(dataset)))
# Sanity checks.
self.assertBetween(results['sparse_categorical_accuracy'], 0.02, 1.)
self.assertGreater(results['l2_loss'], 0.)
self.assertGreater(results['l1_loss'], 0.)
# Assert correctness of the loss calculation and updating of metrics.
self.assertNear(
results['l1_loss'] * l1 + results['l2_loss'] * l2 +
results['sparse_categorical_crossentropy'], results['loss'], 1e-6)
class DeterministicModel(keras.Model):
"""Deterministic Model that always outputs the same initial result.
It verifies the `call` method is run inside the same distribution
strategy that the model was initially passed.
"""
def __init__(self, strategy):
super(DeterministicModel, self).__init__()
self.x = None
self.strategy = strategy
def build(self, input_shape):
self.x = variables.Variable(array_ops.ones(shape=()))
def call(self, inputs, training=None, mask=None):
active_strategy = distribution_strategy_context.get_strategy()
if active_strategy is not self.strategy:
raise ValueError('Model must execute call w/ the original strategy')
return self.x * inputs
class TestModelCapturesStrategy(test.TestCase, parameterized.TestCase):
"""Tests that model creation captures the strategy."""
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=['eager']))
def test_fit_and_evaluate(self, distribution):
dataset = dataset_ops.DatasetV2.from_tensor_slices(
(array_ops.ones(shape=(64,)), array_ops.ones(shape=(64,))))
dataset = dataset.batch(8 * distribution.num_replicas_in_sync)
# Make model with distribution strategy
with distribution.scope():
model = DeterministicModel(distribution)
optimizer = keras.optimizers.adam_v2.Adam(1e-4)
# Compile & evaluate the model outside of the distribution strategy scope
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=['binary_accuracy'])
# Call `optimizer.iterations` out of strategy scope.
self.assertEqual(model.optimizer.iterations.numpy(), 0)
# Non-eager training doesn't support steps_per_epoch=None.
for unused_epoch in range(2):
model.fit(dataset)
results = model.evaluate(dataset)
results = dict(zip(model.metrics_names, results))
# Check that the metrics have a result we expect
self.assertEqual(results['binary_accuracy'], 1.0)
self.assertAllClose(results['loss'], 0.0)
# Assert that all metric/optimizer/model variables were made in the
# distribution strategy (Test that compile uses the captured
# distribution strategy)
metric_vars = nest.flatten(
[metric.variables for metric in model.metrics])
for var in metric_vars:
self.assertTrue(distribution.extended.variable_created_in_scope(var))
for var in model.optimizer._weights:
self.assertTrue(distribution.extended.variable_created_in_scope(var))
for var in model.variables:
self.assertTrue(distribution.extended.variable_created_in_scope(var))
# Make sure the metric must be created in the same scope as the model:
# This shouldn't raise any validation errors
with distribution.scope():
metric = keras.metrics.BinaryAccuracy()
model.compile(
optimizer=optimizer,
loss=keras.losses.MeanSquaredError(),
metrics=[metric])
# This should raise an error because the metric is constructed
# outside of the scope, and not by compile
if distribution_strategy_context.has_strategy():
with self.assertRaisesRegexp(
ValueError, 'All metrics must be created in'):
model.compile(
optimizer=keras.optimizers.adam_v2.Adam(1e-4),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.BinaryAccuracy()])
if __name__ == '__main__':
base_layer_utils.enable_v2_dtype_behavior()
test.main()
|
import subprocess
import sys
from pygame import mixer
import os
import time
from Brain.brain import Brain
from MySqlite import mysqlite as sq
from Voice import speakmodule
from actions import check_audio
sq.create_table()
def main():
mode=[]
try:
mode = sys.argv
if mode[1][1:]=="text":
msg="Initializing Text Mode"
check_audio.check(msg)
start_text_prompt()
if mode[1][1:]=="voice":
msg="Initializing Voice Mode"
check_audio.check(msg)
start_listening()
if mode[1][1:]=="remote":
msg="Initializing Remote Mode"
check_audio.check(msg)
start_remote_prompt()
except IndexError:
usage()
def start_listening():
print("Voice Mode Activated")
brain = Brain()
brain.voice_mode()
def start_text_prompt():
print("Text Input Mode Is Activated")
brain = Brain()
brain.text_mode()
def start_remote_prompt():
print("Remote Mode Activated")
brain = Brain()
brain.remote_mode()
def usage():
usage = """
+-------------------------------------------------------+
| Usage: python jarvis.py [options] |
| |
|-------------------------------------------------------|
| -text initialize jarvis with text mode. |
| -voice initialize jarvis with voice mode. |
| -remote initialize jarvis with remote mode. |
+-------------------------------------------------------+
"""
print (usage)
sys.exit(1)
if __name__ == '__main__':
main()
|
"""Instruction for coffee machine
1. make and serve me, you and Gibbs a cup of coffee(add coffee, and hot water, stir)
2. change how the mix is stirred
3. A better way to make cofee with less repetition
4. Make you coffe with milk and suger (add suger, and milk)
5. Make Gibbs coffe with milk, sugar and something else (add sugar, milk'...)
6. Refactor
"""
# make my coffee
ingredients = ['coffee', 'hot water']
print ('started making coffee...')
print('Getting cup')
print('Adding {}'.format(', '.join(ingredients)))
print('Stir the mix 6 sec')
print('Finished making coffee...')
my_coffee = 'Tasty coffee'
print("--Here' your {} {}. Enjoy !!!! --\n".format(my_coffee, 'silas'))
#make you coffee
print ('started making coffee...')
print('Getting cup')
print('Adding {}'.format(', '.join(ingredients)))
print('Stir the mix for 6 sec')
print('Finished making coffee...')
your_coffee = 'Tasty coffee'
print("--Here' your {} {}. Enjoy !!!! --\n".format(your_coffee, 'You'))
#make Gibbs coffee
print ('started making coffee...')
print('Getting cup')
print('Adding {}'.format(', '.join(ingredients)))
print('Stir the mix for 6 sec')
print('Finished making coffee...')
gibbs_coffee = 'Tasty coffee'
print("--Here' your {} {}. Enjoy !!!! --\n".format(gibbs_coffee, 'Gibbs'))
|
import autofit as af
def test_constructor():
prior_model = af.PriorModel(af.m.MockOverload)
assert prior_model.prior_count == 1
instance = prior_model.instance_from_prior_medians()
assert instance.one == 1.0
assert instance.two == 2
def test_alternative():
prior_model = af.PriorModel(af.m.MockOverload.with_two)
assert prior_model.prior_count == 1
instance = prior_model.instance_from_prior_medians()
assert instance.two == 1.0
assert instance.one == 1.0 / 2
|
import logging
import click
LOG = logging.getLogger(__name__)
@click.command('index', short_help='Display all indexes')
@click.option('-n', '--collection-name')
@click.pass_context
def index(context, collection_name):
"""Show all indexes in the database"""
LOG.info("Running scout view index")
adapter = context.obj['adapter']
i = 0
click.echo("collection\tindex")
for collection_name in adapter.collections():
for index in adapter.indexes(collection_name):
click.echo("{0}\t{1}".format(collection_name, index))
i += 1
if i == 0:
LOG.info("No indexes found")
|
from .utils import *
class BlockType:
Info = 0
Spawns = 1
Textures = 2
Tiles = 3
Economy = 4
class ZoneType:
Grass = 0
Mountain = 1
MountainVillage = 2
BoatVillage = 3
Login = 4
MountainGorge = 5
Beach = 6
JunonDungeon = 7
LunaSnow = 8
Birth = 9
JunonField = 10
LunaDungeon = 11
EldeonField = 12
EldeonField2 = 13
JunonPyramids = 14
class Position:
def __init__(self):
self.is_used = False
self.position = Vector2()
def __repr__(self):
return "Position ({},{})[{}]".format(
self.position.x,
self.position.y,
"Used" if self.used else "Not Used")
class Spawn:
def __init__(self):
self.position = Vector3()
self.name = ""
def __repr__(self):
return "Spawn '{}'".format(self.name)
class Tile:
def __init__(self):
self.layer1 = 0
self.layer2 = 0
self.offset1 = 0
self.offset2 = 0
self.blending = False
self.rotation = 0
self.tile_type = 0
class Zon:
def __init__(self, filepath=None):
self.zone_type = None
self.width = 0
self.length = 0
self.grid_count = 0
self.grid_size = 0.0
self.start_position = Vector2()
self.positions = []
self.spawns = []
self.textures = []
self.tiles = []
self.name = ""
self.is_underground = False
self.background_music_path = ""
self.sky_path = ""
self.economy_check_rate = 50
self.population_base = 100
self.population_growth_rate = 10
self.metal_consumption = 50
self.stone_consumption = 50
self.wood_consumption = 50
self.leather_consumption = 50
self.cloth_consumption = 50
self.alchemy_consumption = 50
self.chemical_consumption = 50
self.industrial_consumption = 50
self.medicine_consumption = 50
self.food_consumption = 50
if filepath:
self.load(filepath)
def __repr__(self):
return "{} zone".format(self.zone_type)
def load(self, filepath):
with open(filepath, "rb") as f:
block_count = read_i32(f)
for i in range(block_count):
block_type = read_i32(f)
block_offset = read_i32(f)
next_block = f.tell()
f.seek(block_offset)
if block_type == BlockType.Info:
self.zone_type = read_i32(f)
self.width = read_i32(f)
self.length = read_i32(f)
self.grid_count = read_i32(f)
self.grid_size = read_f32(f)
self.start_position.x = read_i32(f)
self.start_position.y = read_i32(f)
self.positions = list_2d(self.width, self.length)
for y in range(self.width):
for x in range(self.length):
p = Position()
p.is_used = read_bool(f)
p.position.x = read_f32(f)
p.position.y = read_f32(f)
self.positions[y][x] = p
elif block_type == BlockType.Spawns:
spawn_count = read_i32(f)
for j in range(spawn_count):
s = Spawn()
s.position.x = read_f32(f)
s.position.y = read_f32(f)
s.position.z = read_f32(f)
s.name = read_bstr(f)
self.spawns.append(s)
elif block_type == BlockType.Textures:
texture_count = read_i32(f)
for j in range(texture_count):
self.textures.append(read_bstr(f))
elif block_type == BlockType.Tiles:
tile_count = read_i32(f)
for j in range(tile_count):
t = Tile()
t.layer1 = read_i32(f)
t.layer2 = read_i32(f)
t.offset1 = read_i32(f)
t.offset2 = read_i32(f)
t.blending = (read_i32(f) != 0)
t.rotation = read_i32(f)
t.tile_type = read_i32(f)
self.tiles.append(t)
elif block_type == BlockType.Economy:
self.name = read_bstr(f)
self.is_underground = (read_i32(f) != 0)
self.background_music_path = read_bstr(f)
self.sky_path = read_bstr(f)
self.economy_check_rate = read_i32(f)
self.population_base = read_i32(f)
self.population_growth_rate = read_i32(f)
self.metal_consumption = read_i32(f)
self.stone_consumption = read_i32(f)
self.wood_consumption = read_i32(f)
self.leather_consumption = read_i32(f)
self.cloth_consumption = read_i32(f)
self.alchemy_consumption = read_i32(f)
self.chemical_consumption = read_i32(f)
self.industrial_consumption = read_i32(f)
self.medicine_consumption = read_i32(f)
self.food_consumption = read_i32(f)
if i < (block_count - 1):
f.seek(next_block)
|
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from Crypto.Random import random
g_backend = default_backend()
g_iv1 = b"1234567812345678"
g_iv2 = bytes("1234567812345678", "utf8")
def p_example1_hard_coded1(key, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(b"1234567812345678"), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example2_hard_coded2(key, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(bytes("1234567812345678", "utf8")), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example3_local_variable1(key, data):
iv = b"1234567812345678"
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example4_local_variable2(key, data):
iv = bytes("1234567812345678", "utf8")
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example5_nested_local_variable1(key, data):
iv1 = b"1234567812345678"
iv2 = iv1
iv3 = iv2
cipher = Cipher(algorithms.AES(key), modes.CBC(iv3), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example6_nested_local_variable2(key, data):
iv1 = bytes("1234567812345678", "utf8")
iv2 = iv1
iv3 = iv2
cipher = Cipher(algorithms.AES(key), modes.CBC(iv3), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_method_call(key, iv, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example_nested_method_call(key, iv, data):
return p_example_method_call(key, iv, data)
def p_example7_direct_method_call1(key, data):
iv = b"1234567812345678"
return p_example_method_call(key, iv, data)
def p_example8_direct_method_call2(key, data):
iv = bytes("1234567812345678", "utf8")
return p_example_method_call(key, iv, data)
def p_example9_nested_method_call1(key, data):
iv = b"1234567812345678"
return p_example_nested_method_call(key, iv, data)
def p_example10_nested_method_call2(key, data):
iv = bytes("1234567812345678", "utf8")
return p_example_nested_method_call(key, iv, data)
def p_example11_direct_g_variable_access1(key, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(g_iv1), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example12_direct_g_variable_access2(key, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(g_iv2), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example13_indirect_g_variable_access1(key, data):
iv = g_iv1
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example14_indirect_g_variable_access2(key, data):
iv = g_iv2
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def p_example15_warning_parameter_not_resolvable(key, iv, data):
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
def n_example1_cbc(key, data):
iv = random.getrandbits(16).to_bytes(16, 'big')
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=g_backend)
encryptor = cipher.encryptor()
cipher_text = encryptor.update(data) + encryptor.finalize()
return cipher_text
|
#!/usr/bin/env python3
import os
import subprocess as sp
import numpy as np
from configobj import ConfigObj
import flannel.io as fio
from .image_routines import load_image
class ResultObject:
"""
Small object to hold registration result info
"""
def __init__(self, registered_path, map_path, log_path, fixed_path=None,
moved_path=None):
self.registered_path = registered_path
self.map_path = map_path
self.log_path = log_path
self.fixed_path = fixed_path
self.moved_path = moved_path
class pFIRERunnerMixin:
""" Mixin class to provide a pFIRE runner interface
"""
def __init__(self, *args, **kwargs):
super(pFIRERunnerMixin, self).__init__(*args, **kwargs)
self.pfire_fixed_path = None
self.pfire_moved_path = None
self.pfire_mask_path = None
self.pfire_reg_path = None
self.pfire_map_path = None
def run_pfire(self, config_path, comm_size=1):
""" Run pFIRE using provided config file
"""
if comm_size != 1:
raise RuntimeError("MPI pFIRE runs not yet supported")
pfire_workdir, pfire_config = [os.path.normpath(x) for x in
os.path.split(config_path)]
# opening explicitly causes failure on file nonexistence
with open(config_path, 'r') as fh:
config = ConfigObj(fh)
print("Running pFIRE on {}".format(pfire_config))
self.pfire_fixed_path = os.path.join(pfire_workdir, config['fixed'])
self.pfire_moved_path = os.path.join(pfire_workdir, config['moved'])
try:
self.pfire_mask_path = os.path.join(pfire_workdir, config['mask'])
except KeyError:
pass
self.pfire_logfile = os.path.join(
pfire_workdir,
"{}_pfire.log".format(os.path.splitext(pfire_config)[0]))
with open(self.pfire_logfile, 'w') as logfile:
pfire_args = ['pfire', pfire_config]
res = sp.run(pfire_args, cwd=pfire_workdir, stdout=logfile,
stderr=logfile)
if res.returncode != 0:
raise RuntimeError("Failed to run pFIRE, check log for details: {}"
"".format(self.pfire_logfile))
with open(self.pfire_logfile, 'r') as logfile:
for line in logfile:
if line.startswith("Saved registered image to "):
reg_path = line.replace("Saved registered image to ",
"").strip()
self.pfire_reg_path = os.path.join(pfire_workdir, reg_path)
elif line.startswith("Saved map to "):
map_path = line.replace("Saved map to ", "").strip()
self.pfire_map_path = os.path.join(pfire_workdir, map_path)
if not (self.pfire_reg_path or self.pfire_map_path):
raise RuntimeError("Failed to extract result path(s) from log")
class ShIRTRunnerMixin:
""" Mixin class to provide a ShIRT runner interface accepted a pFIRE config
file
"""
default_mask_name = "default_mask.mask"
config_defaults = {"mask": None}
def __init__(self, *args, **kwargs):
super(ShIRTRunnerMixin, self).__init__(*args, **kwargs)
self.shirt_fixed_path = None
self.shirt_moved_path = None
self.shirt_mask_path = None
self.shirt_reg_path = None
self.shirt_map_path = None
def _build_shirt_config(self, config_file):
"""
Build ShIRT config object given pfire config file
"""
# read pfire config and merge with default options to get full option list
defaults = ConfigObj(self.config_defaults)
config = ConfigObj(config_file)
defaults.merge(config)
return defaults
@staticmethod
def _strip_imgname(name):
shirtified = os.path.basename(name)
return os.path.splitext(shirtified)[0]
def run_shirt(self, config_path):
"""
Run ShIRT using a pfire config file for input
"""
config = self._build_shirt_config(config_path)
work_dir, config_file = os.path.split(config_path)
print("Running ShIRT on {}".format(config_file))
self.shirt_reg_path = 'shirt_{}_registered.image'.format(self.name)
self.shirt_map_path = 'shirt_{}_map.map'.format(self.name)
for fom in ['fixed', 'moved', 'mask']:
if fom == 'mask' and config[fom] is None:
continue
if config[fom].endswith(".image"):
setattr(self, "shirt_{}_path".format(fom),
os.path.join(work_dir, config[fom]))
else:
data = load_image(os.path.join(work_dir, config[fom]))
newname = os.path.join(
work_dir, os.path.splitext(config[fom])[0] + '.image')
setattr(self, "shirt_{}_path".format(fom), newname)
fio.save_image(data, newname)
if config['mask'] is None:
self.shirt_mask_path = os.path.join(work_dir,
self.default_mask_name)
data = load_image(os.path.join(work_dir, config['fixed']))
data = np.full(data.shape, 1.0)
fio.save_image(data, self.shirt_mask_path)
shirt_args = ['ShIRT', 'Register', 'verbose',
'Fixed', self._strip_imgname(self.shirt_fixed_path),
'Moved', self._strip_imgname(self.shirt_moved_path),
'Mask', self._strip_imgname(self.shirt_mask_path),
'NodeSpacing', config['nodespacing'],
'Registered', self._strip_imgname(self.shirt_reg_path),
'Map', self._strip_imgname(self.shirt_map_path)]
self.shirt_logfile = os.path.join(
work_dir, "{}_shirt.log".format(os.path.splitext(config_file)[0]))
with open(self.shirt_logfile, 'w') as logfile:
sp.run(['ShIRT', 'setpath', 'DataPath', '.'], cwd=work_dir)
res = sp.run(shirt_args, stdout=logfile, stderr=logfile,
cwd=work_dir)
if res.returncode != 0:
raise RuntimeError("Failed to run ShIRT, check log for details: {}"
"".format(self.shirt_logfile))
self.shirt_fixed_path = os.path.join(work_dir, self.shirt_fixed_path)
self.shirt_moved_path = os.path.join(work_dir, self.shirt_moved_path)
self.shirt_mask_path = os.path.join(work_dir, self.shirt_mask_path)
self.shirt_reg_path = os.path.join(work_dir, self.shirt_reg_path)
self.shirt_map_path = os.path.join(work_dir, self.shirt_map_path)
|
"""
Feature Infection
Control release of changes to groups of users
This module provides the entities for the feature infection
module. Infections are tags that can be applied to entities
while respecting clustering relationships between users.
Exports:
Infector: class that represents a named feature
InfectionControl: scope for registering infections on entities
CDC: module scoped InfectionControl instance (a pun on
the center for disease control)
Usage:
Generally, users will use the module scoped CDC to do an infection
>>> User = collections.namedtuple("User", "name coaches")
>>> user1 = User("one", ())
>>> user2 = User("two", ("one"))
>>> user3 = User("three", ())
>>> users = [user1, user2, user3]
>>> coaches = operator.itemgetter("coaches")
>>> super_learning = feature_infection.CDC.get_infector("super-learning")
>>> super_learning.total_infection(users, user1, connections=coaches)
set([User(name='one', coaches=()), User(name='two', coaches=User(name=\
'one, coaches=()))])
Glossary:
infection - Assignment of an infector to an entity
infector - Label that can be used to infect an entity
infectable - Entity that can be infected
"""
from operator import itemgetter
from collections import defaultdict
import logging
import networkx as nx
from . import subset_sum as ss
_LOG = logging.getLogger(__name__)
class Infector(object):
"""Class representing a feature or other tag to apply to entities"""
def __init__(self, control, name):
"""Create named feature and tie it to an InfectionControl scope"""
self.control = control
self.name = name
@staticmethod
def _generate_graph(infectables, connections=iter):
"""Convert a list of infectables into a graph via connections"""
if not callable(connections):
raise ValueError("connections is not a function")
if isinstance(infectables, nx.Graph):
return infectables
infectables_graph = nx.Graph()
for infectable in infectables:
infectables_graph.add_node(infectable)
for connected_infectable in connections(infectable):
infectables_graph.add_edge(infectable, connected_infectable)
return infectables_graph
@staticmethod
def _get_total_infection_plan(infectables, initial_infected):
"""Plan for infecting everything connected to an initial infectable"""
connected = nx.node_connected_component(infectables, initial_infected)
return connected
@staticmethod
def _get_limited_infection_plan(infectables, target_size):
"""Plan for infecting a group infectables no larger than target_size"""
groups = [(len(group), group)
for group in nx.connected_components(infectables)]
get_count = itemgetter(0)
get_users = itemgetter(1)
_, infection_groups = ss.optimize(groups, target_size, key=get_count)
infected = map(get_users, infection_groups)
infection_plan = set.union(*infected) if infected else set()
return infection_plan
def total_infection(self, infectables_seq, initial_infected,
connections=None):
"""Create an infection of all users connected to the target user.
Starting at the root provided by initial_infected, infect all the
connected infectable entities in the infectables graph
Args:
infectables_seq: list or graph of infectables. If a list is
provided it will be converted to a graph with edges
defined by using the connections parmeter
inital_infected: infectable to form the root of the infection
(optional) connections: function that produces adjacent
infectables for a given infectable. The produced graph
is undirected but connections only is required to produce
adjecency in one direction.
Returns:
set infected: returns a set of the infectables that were infected
Raises:
ValueError: connection is not a function
"""
if not infectables_seq:
return set()
infectables = self._generate_graph(infectables_seq, connections)
plan = self._get_total_infection_plan(infectables, initial_infected)
self.control.infect(self, *plan)
return plan
def limited_infection(self, infectables_seq, target_size,
connections=None):
"""Create an infection that is bounded by the target size
Finds a subset of infectables that approximates as well as
possible an infection of the target_size.
This function will not break apart groups to get closer to
the target_size. Callers who need a closer approximation in
the presence of a large connected cluster will need to provide
a pruned connections function.
Args:
infectables_seq: list or graph of infectables. If a list is
provided it will be converted to a graph with edges
defined by using the connections parmeter
target_size: limit to the number of infected produced
(optional) connections: function that produces adjacent
infectables for a given infectable. The produced graph
is undirected but connections only is required to produce
adjecency in one direction.
Returns:
set infected: returns a set of the infectables that were infected
Raises:
ValueError: connection is not a function
"""
if not infectables_seq:
return set()
infectables = self._generate_graph(infectables_seq, connections)
plan = self._get_limited_infection_plan(infectables, target_size)
self.control.infect(self, *plan)
return plan
def is_infected(self, infectable):
"""Create an infection"""
return self.control.has_infection(infectable, self)
class InfectionControl(object):
"""Manage infections for a collection of objects"""
def __init__(self):
"""Create an infection controller"""
self.infectors = {}
self.infections = defaultdict(list)
@staticmethod
def _get_tag(infector):
return infector if isinstance(infector, basestring) else infector.name
def get_infector(self, name):
"""Get or create a feature"""
return self.infectors.setdefault(name, Infector(self, name))
def infect(self, infector, *infectables):
"""Infect all provided infectables with the given feature."""
infection_tag = self._get_tag(infector)
for infectable in infectables:
self.infections[infectable].append(infection_tag)
_LOG.info("User %s infected with feature %s.", infectable,
infection_tag)
def has_infection(self, infectable, infector):
"""Check if an infectable entity has an infection"""
return self._get_tag(infector) in self.infections.get(infectable, [])
CDC = InfectionControl()
|
name = input ('ola seu nome e?')
print (name)
|
# Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5x.models."""
import functools
from unittest import mock
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import flax
from flax import traverse_util
import jax
import jax.numpy as jnp
import numpy as np
import t5.data.tasks # pylint:disable=unused-import
from t5x import decoding
from t5x import models
from t5x import partitioning
from t5x import test_utils
from t5x import trainer as trainer_lib
from t5x import utils
import tensorflow as tf
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
PartitionSpec = partitioning.PartitionSpec
class ModelsTest(parameterized.TestCase):
def test_remove_prefix(self):
sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]])
prefix_lengths = np.array([2, 4])
expected = [[3, 4, 5, 6, 7, 0, 0, 0], [10, 11, 0, 0, 0, 0, 0, 0]]
remove_prefix = jax.jit(models.remove_prefix)
actual = remove_prefix(sequences, prefix_lengths)
np.testing.assert_array_equal(actual, expected)
def test_remove_prefix_zero_len_prefix(self):
sequences = np.array([[1, 2, 3, 4, 5, 6, 7, 0], [6, 7, 8, 9, 10, 11, 0, 0]])
prefix_lengths = np.array([0, 0])
remove_prefix = jax.jit(models.remove_prefix)
actual = remove_prefix(sequences, prefix_lengths)
# The expected output is the original sequences.
np.testing.assert_array_equal(actual, sequences)
BATCH_SIZE, ENCODER_LEN, MAX_DECODE_LEN, EMBED_DIM = 2, 3, 4, 5
class EncoderDecoderModelTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='no_types',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62]
},
types=None),
dict(
testcase_name='int32',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62]
},
types={
'encoder_input_tokens': jnp.int32,
'decoder_input_tokens': jnp.int32
}),
dict(
testcase_name='float32',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62],
'encoder_positions': [1, 512],
'decoder_positions': [1, 62],
},
types={
'encoder_input_tokens': jnp.int32,
'decoder_input_tokens': jnp.int32,
'encoder_positions': jnp.int32,
'decoder_positions': jnp.int32
}),
dict(
testcase_name='float32_segment_ids',
shapes={
'encoder_input_tokens': [1, 512],
'decoder_input_tokens': [1, 62],
'encoder_segment_ids': [1, 512],
'decoder_segment_ids': [1, 62],
},
types={
'encoder_input_tokens': jnp.int32,
'decoder_input_tokens': jnp.int32,
'encoder_segment_ids': jnp.int32,
'decoder_segment_ids': jnp.int32
}),
)
def test_get_initial_variables_shapes_and_types(self, shapes, types):
mock_transformer = mock.Mock()
mock_transformer.init.return_value = {'params': {}}
mock_optimizer_def = mock.Mock()
rng = mock.Mock()
def mock_init(self):
self.module = mock_transformer
self.optimizer_def = mock_optimizer_def
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
model.get_initial_variables(rng, shapes, types)
if types is None:
encoder_input = jnp.ones(
shapes['encoder_input_tokens'], dtype=jnp.float32)
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=jnp.float32)
else:
encoder_input = jnp.ones(
shapes['encoder_input_tokens'], dtype=types['encoder_input_tokens'])
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens'])
# Using `.assert_called_once_with` doesn't work because the simple
# comparison it does for the array arguments fail (truth value of an array
# is ambiguous).
called_with = mock_transformer.init.call_args
self.assertEqual(called_with[0][0], rng)
np.testing.assert_allclose(called_with[0][1], encoder_input)
np.testing.assert_allclose(called_with[0][2], decoder_input)
np.testing.assert_allclose(called_with[0][3], decoder_input)
if 'encoder_positions' in shapes:
encoder_positions = jnp.ones(
shapes['encoder_positions'], dtype=types['encoder_positions'])
np.testing.assert_allclose(called_with[1]['encoder_positions'],
encoder_positions)
else:
self.assertIsNone(called_with[1]['encoder_positions'])
if 'decoder_positions' in shapes:
decoder_positions = jnp.ones(
shapes['decoder_positions'], dtype=types['decoder_positions'])
np.testing.assert_allclose(called_with[1]['decoder_positions'],
decoder_positions)
else:
self.assertIsNone(called_with[1]['decoder_positions'])
if 'encoder_segment_ids' in shapes:
encoder_positions = jnp.ones(
shapes['encoder_segment_ids'], dtype=types['encoder_segment_ids'])
np.testing.assert_allclose(called_with[1]['encoder_segment_ids'],
encoder_positions)
else:
self.assertIsNone(called_with[1]['encoder_segment_ids'])
if 'decoder_segment_ids' in shapes:
decoder_segment_ids = jnp.ones(
shapes['decoder_segment_ids'], dtype=types['decoder_segment_ids'])
np.testing.assert_allclose(called_with[1]['decoder_segment_ids'],
decoder_segment_ids)
else:
self.assertIsNone(called_with[1]['decoder_segment_ids'])
self.assertFalse(called_with[1]['decode'])
self.assertFalse(called_with[1]['enable_dropout'])
@parameterized.named_parameters(
dict(testcase_name='no_force_decoding', prompt_with_targets=False),
dict(testcase_name='force_decoding', prompt_with_targets=True),
)
def test_prompt_with_targets(self, prompt_with_targets):
batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5
batch = {
'encoder_input_tokens':
np.zeros((batch_size, encoder_len), dtype=np.int32),
'decoder_input_tokens':
np.full([batch_size, max_decode_len], 2, dtype=np.int32)
}
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 3.
# We test `_predict_step` to avoid having to define a task and its
# vocabulary.
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, -1e7, 0]]),
axis=1)
mock_decode_fn = mock.Mock()
mock_decode_fn.return_value = (np.full([batch_size, max_decode_len, 1],
3,
dtype=np.int32),
np.full([batch_size, 1],
1.0,
dtype=np.float32))
class MockModule:
def __init__(self):
self.dtype = jnp.float32
def apply(self, *args, method=None, **kwargs):
del args, kwargs
if method is None: # use for module.`__call__`
return (dummy_logits, {'cache': {}})
else:
return method()
def encode(self):
return jnp.zeros((batch_size, encoder_len, emb_dim))
def decode(self):
return (dummy_logits, {'cache': {}})
def mock_init(self):
self.module = MockModule()
self.module.scan_layers = False
self._input_vocabulary = mock.Mock(eos_id=1)
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = mock_decode_fn
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
model.predict_batch_with_aux({},
batch,
prompt_with_targets=prompt_with_targets)
if prompt_with_targets:
expected_inputs = batch['decoder_input_tokens']
else:
expected_inputs = np.zeros([batch_size, max_decode_len], dtype=np.int32)
assert mock_decode_fn.call_count == 1
# Look at the kwargs call list for inputs, assert_called_with doesn't
# work well with np.array comparison.
np.testing.assert_array_equal(mock_decode_fn.mock_calls[0][2]['inputs'],
expected_inputs)
def test_predict_batch_loop_and_caches_are_equal(self):
vocab_size = 50
lengths = np.array([[2], [3]])
batch_size, beam_size, encoder_len, max_decode_len = 2, 2, 3, 7
batch = {
'encoder_input_tokens':
np.zeros((batch_size, encoder_len), dtype=np.int32),
'decoder_target_tokens':
np.zeros((batch_size, encoder_len), dtype=np.int32),
'decoder_input_tokens':
np.concatenate(
[
np.expand_dims(
np.concatenate(
[[0],
np.arange(9, 9 + lengths[0][0], dtype=np.int32),
np.zeros((max_decode_len - lengths[0][0] - 1),
dtype=np.int32)]),
axis=0), # First element
np.expand_dims(
np.concatenate(
[[0],
np.arange(3, 3 + lengths[1][0], dtype=np.int32),
np.zeros((max_decode_len - lengths[1][0] - 1),
dtype=np.int32)]),
axis=0) # Second element
],
axis=0),
}
model = test_utils.get_t5_test_model(vocab_size=50)
module = model.module
params = module.init(
jax.random.PRNGKey(0),
jnp.ones((batch_size, encoder_len)),
jnp.ones((batch_size, max_decode_len)),
jnp.ones((batch_size, max_decode_len)),
enable_dropout=False)['params']
def mock_init(self):
self.module = module
# Set the EOS token to be larger then the vocabulary size. This forces the
# model to decode all the way to `max_decode_length`, allowing us to test
# behavior when one element reaches the end before the others.
self._output_vocabulary = mock.Mock(eos_id=vocab_size + 12)
self._decode_fn = decoding.beam_search
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
with mock.patch.object(
model, '_compute_logits_from_slice',
autospec=True) as tokens_to_logits_mock:
# Make the side effect of the mock, call the method on the class, with the
# instance partialed in as `self`. This lets us call the actual code,
# while recording the inputs, without an infinite loop you would get
# calling `instance.method`
tokens_to_logits_mock.side_effect = functools.partial(
models.EncoderDecoderModel._compute_logits_from_slice, model)
# Disable jit, so that the `lax.while_loop` isn't traced, as the
# collection of tracers in the mock call_args would generally trigger a
# tracer leak error.
with jax.disable_jit():
_ = model.predict_batch_with_aux(
params, batch, prompt_with_targets=True, num_decodes=2)
# Collect all the input tokens to our tokens_to_logits function
all_inputs = []
all_cache_keys = [] # Collect all the cache keys
all_cache_values = [] # Collect all the cache values
# Currently force decoding generates logits at every step. We should have
# `max_decode_length` calls to our tokens -> logits func.
self.assertLen(tokens_to_logits_mock.call_args_list, max_decode_len)
for tokens_call in tokens_to_logits_mock.call_args_list:
# Inputs: [B * Be, 1]
inputs, cache = tokens_call[0]
cache = flax.core.unfreeze(cache)
# Cache: [B * Be, 1] * #Layers
cache_keys = [
v for k, v in traverse_util.flatten_dict(cache).items()
if k[-1] == 'cached_key'
]
cache_values = [
v for k, v in traverse_util.flatten_dict(cache).items()
if k[-1] == 'cached_value'
]
all_inputs.append(inputs)
all_cache_keys.append(cache_keys)
all_cache_values.append(cache_values)
# Convert inputs to a single block [B, DL, Be]
all_inputs = np.concatenate(all_inputs, axis=1)
# Convert caches into a single block per layer [B * Be, DL] * L
all_cache_keys = [np.stack(c, axis=1) for c in zip(*all_cache_keys)]
all_cache_values = [np.stack(c, axis=1) for c in zip(*all_cache_values)]
# Make sure that for each batch, the cache for each beam is identical when
# prompt is being forced.
for b in range(batch_size):
for i, input_token in enumerate(all_inputs[b * beam_size]):
if i < lengths[b]:
self.assertEqual(input_token, batch['decoder_input_tokens'][b][i])
# For all layers.
for cache_keys in all_cache_keys:
np.testing.assert_array_equal(cache_keys[b * beam_size][i],
cache_keys[b * beam_size + 1][i])
for cache_values in all_cache_values:
np.testing.assert_array_equal(cache_values[b * beam_size][i],
cache_values[b * beam_size + 1][i])
def test_score_batch(self):
encoder_input_tokens = jnp.ones((2, 3))
# For this test, decoder input and target tokens are dummy values.
decoder_input_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_target_tokens = jnp.array([[1, 2, 1, 0], [0, 1, 0, 2]])
decoder_loss_weights = jnp.array([[1, 1, 1, 0], [0, 1, 0, 1]])
logits = jnp.arange(0, 24).reshape((2, 4, 3))
params = {'foo': jnp.zeros(3)}
mock_transformer = mock.Mock()
mock_transformer.apply.return_value = logits
mock_transformer.dtype = jnp.float32
batch = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens,
'decoder_loss_weights': decoder_loss_weights
}
def mock_init(self):
self.module = mock_transformer
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
res = model.score_batch(params, batch)
mock_transformer.apply.assert_called_with({'params': params},
encoder_input_tokens,
decoder_input_tokens,
decoder_target_tokens,
encoder_segment_ids=None,
decoder_segment_ids=None,
encoder_positions=None,
decoder_positions=None,
decode=False,
enable_dropout=False,
rngs=None,
mutable=False)
np.testing.assert_allclose(res, [-3.222973, -1.815315], rtol=1e-4)
def test_train_transformer_wmt(self):
# Dummy input data
input_shape = (16, 8)
encoder_input_tokens = np.ones(shape=input_shape, dtype=np.float32)
decoder_input_tokens = 5 * np.ones(shape=input_shape, dtype=np.float32)
decoder_target_tokens = 5 * np.ones(input_shape, dtype=np.float32)
# input_data = {'inputs': inputs, 'targets': targets}
input_data = {
'encoder_input_tokens': encoder_input_tokens,
'decoder_input_tokens': decoder_input_tokens,
'decoder_target_tokens': decoder_target_tokens
}
partitioner = partitioning.PjitPartitioner(num_partitions=1)
model = test_utils.get_t5_test_model()
ds_iter = tf.data.Dataset.from_tensors(input_data).as_numpy_iterator()
input_shapes = {k: input_shape for k in input_data}
train_state_initializer = utils.TrainStateInitializer(
optimizer_def=model.optimizer_def,
init_fn=model.get_initial_variables,
input_shapes=input_shapes,
partitioner=partitioner)
train_state_axes = train_state_initializer.train_state_axes
train_state = train_state_initializer.from_scratch(jax.random.PRNGKey(0))
trainer = trainer_lib.Trainer(
model,
train_state=train_state,
partitioner=partitioner,
eval_names=[],
summary_dir=None,
train_state_axes=train_state_axes,
rng=jax.random.PRNGKey(0),
learning_rate_fn=lambda x: 0.001,
num_microbatches=1)
trainer.train(ds_iter, 1)
logging.info('optimizer after first step %s', train_state.params)
@parameterized.parameters(
{'decode_fn': decoding.beam_search},
{'decode_fn': functools.partial(decoding.temperature_sample, topk=4)})
def test_predict_batch(self, decode_fn):
batch_size, encoder_len, max_decode_len, emb_dim = 2, 3, 4, 5
batch = {
'encoder_input_tokens':
np.zeros((batch_size, encoder_len), dtype=np.int32),
'decoder_input_tokens':
np.zeros((batch_size, max_decode_len), dtype=np.int32)
}
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 2.
# We test `_predict_step` to avoid having to define a task and its
# vocabulary.
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, 0]]), axis=1)
class MockModule:
def __init__(self):
self.dtype = jnp.float32
def apply(self, *args, method=None, **kwargs):
del args, kwargs
if method is None: # use for module.`__call__`
return (dummy_logits, {'cache': {}})
else:
return method()
def encode(self):
return jnp.zeros((batch_size, encoder_len, emb_dim))
def decode(self):
return (dummy_logits, {'cache': {}})
def mock_init(self):
self.module = MockModule()
self.module.scan_layers = False
self._input_vocabulary = mock.Mock(eos_id=1)
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = decode_fn
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
actual = model.predict_batch({}, batch)
# The predicted token for the first batch element is always 2 and it is 3
# for the second batch element.
expected = [[2] * max_decode_len, [3] * max_decode_len]
np.testing.assert_array_equal(actual, expected)
def test_predict_batch_rng(self):
batch = {
'encoder_input_tokens': np.zeros((2, 1), dtype=np.int32),
'decoder_input_tokens': np.zeros((2, 2), dtype=np.int32)
}
decode_fn_mock = mock.Mock(
return_value=(np.zeros((2, 2, 3)), np.zeros((2, 2))))
def mock_init(self):
self.module = mock.Mock(
apply=mock.Mock(side_effect=lambda *_, **kwargs: ( # pylint:disable=g-long-lambda,g-long-ternary
np.zeros((2, 2)), {
'cache': None
}) if 'mutable' in kwargs else np.zeros((2, 2))))
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = decode_fn_mock
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
# No RNG
model.predict_batch({}, batch)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertNotIn('decode_rng', decode_fn_kwargs)
# No RNG (w/ aux)
model.predict_batch_with_aux({}, batch)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertNotIn('decode_rng', decode_fn_kwargs)
# decoder_params RNG
model.predict_batch_with_aux({}, batch, decoder_params={'decode_rng': 3})
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 3)
# rng RNG
model.predict_batch({}, batch, rng=4)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 4)
# rng RNG (w/ aux)
model.predict_batch_with_aux({}, batch, rng=4)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 4)
# Both
with self.assertRaisesWithLiteralMatch(
ValueError, 'Got RNG both from the `rng` argument (4) and '
"`decoder_params['decode_rng']` (3). Please specify one or the other."):
model.predict_batch_with_aux({},
batch,
rng=4,
decoder_params={'decode_rng': 3})
@parameterized.named_parameters(
dict(
testcase_name='int32',
batch={
'encoder_input_tokens':
np.zeros((BATCH_SIZE, ENCODER_LEN), dtype=np.int32),
'decoder_input_tokens':
np.zeros((BATCH_SIZE, MAX_DECODE_LEN), dtype=np.int32)
}),
dict(
testcase_name='float32',
batch={
'encoder_input_tokens':
np.zeros((BATCH_SIZE, ENCODER_LEN), dtype=np.float32),
'decoder_input_tokens':
np.zeros((BATCH_SIZE, MAX_DECODE_LEN), dtype=np.float32)
}))
def test_predict_batch_fake_input_shapes_and_types(self, batch):
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 2.
# We test `_predict_step` to avoid having to define a task and its
# vocabulary.
dummy_logits = jnp.ones((2, 1, 4), jnp.float32)
class MockModule:
def __init__(self):
self.dtype = jnp.float32
self.call_args_list = []
def apply(self, *args, method=None, **kwargs):
# Not sure why this isn't a real Mock so just record the args/kwargs
self.call_args_list.append({'args': args, 'kwargs': kwargs})
del args, kwargs
if method is None: # use for module.`__call__`
return (dummy_logits, {'cache': {}})
else:
return method()
def encode(self):
return jnp.zeros((BATCH_SIZE, ENCODER_LEN, EMBED_DIM))
def decode(self):
return (dummy_logits, {'cache': {}})
def mock_init(self):
self.module = MockModule()
self.module.scan_layers = False
self._input_vocabulary = mock.Mock(eos_id=1)
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = decoding.beam_search
self._inputs_bidirectional_attention = False
with mock.patch.object(
models.EncoderDecoderModel, '__init__', new=mock_init):
model = models.EncoderDecoderModel()
model.predict_batch({}, batch)
fake_inputs = jnp.ones_like(batch['encoder_input_tokens'])
fake_target = jnp.ones_like(batch['decoder_input_tokens'])
cache_init_call = model.module.call_args_list[0]
self.assertEqual(cache_init_call['args'][0], {'params': {}})
np.testing.assert_allclose(cache_init_call['args'][1], fake_inputs)
np.testing.assert_allclose(cache_init_call['args'][2], fake_target)
np.testing.assert_allclose(cache_init_call['args'][3], fake_target)
self.assertEqual(cache_init_call['kwargs'], {
'decode': True,
'enable_dropout': False,
'mutable': ['cache']
})
class DecoderOnlyModelTest(parameterized.TestCase):
def test_predict_batch_visible_in_prefill(self):
batch_size = 2
seq_len = 10
lengths = np.array([[6], [3]])
batch = {
'decoder_input_tokens':
np.tile(
np.expand_dims(np.arange(seq_len, dtype=np.int32), axis=0),
(batch_size, 1)),
'decoder_causal_attention':
(lengths > np.arange(seq_len)).astype(np.int32)
}
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, 0]]), axis=1)
mock_module = mock.Mock()
mock_module.apply.return_value = (dummy_logits, {'cache': {}})
mock_module.dtype = jnp.float32
def mock_init(self):
self.module = mock_module
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = functools.partial(decoding.temperature_sample, topk=4)
self._inputs_bidirectional_attention = False
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
model.predict_batch({}, batch)
prefill_call = mock_module.apply.call_args_list[1]
kwargs = prefill_call[1]
inputs = prefill_call[1]['decoder_input_tokens']
# Note that, for the prefill call, we use 'decoder_causal_attention' as
# 'decoder_target_tokens'.
targets = prefill_call[1]['decoder_target_tokens']
self.assertTrue(kwargs['prefill'])
np.testing.assert_array_equal(kwargs['prefill_lengths'],
np.squeeze(lengths - 1, axis=-1))
# Test that the non padding values of the "targets" cover all of the input,
# you it will all be considered in the attention mask.
np.testing.assert_array_equal(inputs * targets, inputs)
# Check that the first value of the target is 1, the first value of the
# inputs is always 0 so the masking check wouldn't catch it if the target
# had a 0 in the first location.
np.testing.assert_array_equal(targets[:, 0], np.ones_like(targets[:, 0]))
# Test that the targets are properly removed. Our input is a sequence from 0
# onward, so our largest value (the last input) should be equal by it's
# position (which is 1 - length). If we didn't mask the target correctly,
# we would expect a larger value in the max.
np.testing.assert_array_equal(
np.max(inputs, axis=1), np.squeeze(lengths - 1, axis=-1))
def test_predict_batch(self):
batch = {
'decoder_input_tokens':
np.array([[0, 3, 4, 5, 6, 0, 0], [0, 7, 8, 9, 0, 0, 0]]),
'decoder_causal_attention':
np.array([[1, 1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0]])
}
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 2.
# We test `_predict_step` to avoid having to define a task and its
# vocabulary.
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, 0]]), axis=1)
mock_module = mock.Mock()
mock_module.apply.return_value = (dummy_logits, {'cache': {}})
mock_module.dtype = jnp.float32
def mock_init(self):
self.module = mock_module
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = functools.partial(decoding.temperature_sample, topk=4)
self._inputs_bidirectional_attention = False
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
actual = model.predict_batch({}, batch)
expected = [[2, 2, 2, 2, 2, 0, 0], [3, 3, 3, 3, 3, 3, 0]]
# The expected progression of the first element of 'decoder_input_tokens':
# [0, 3, 4, 5, 6, 0, 0] -> [0, 3, 4, 0, 0, 0, 0] ->
# [3, 4, 2, 2, 2, 2, 2] -> [2, 2, 2, 2, 2, 0, 0]
# The expected progression of the second element of 'decoder_input_tokens':
# [0, 7, 8, 9, 0, 0, 0] -> [0, 7, 0, 0, 0, 0, 0] ->
# [7, 3, 3, 3, 3, 3, 3] -> [3, 3, 3, 3, 3, 3, 0]
np.testing.assert_array_equal(actual, expected)
def test_predict_batch_rng(self):
batch = {
'decoder_input_tokens': np.zeros((2, 2), dtype=np.int32),
'decoder_causal_attention': np.zeros((2, 2), dtype=np.int32)
}
decode_fn_mock = mock.Mock(
return_value=(np.zeros((2, 2, 3)), np.zeros((2, 2))))
def mock_init(self):
self.module = mock.Mock(
apply=mock.Mock(side_effect=lambda *_, **kwargs: ( # pylint:disable=g-long-lambda,g-long-ternary
np.zeros((2, 2)), {
'cache': None
}) if 'mutable' in kwargs else np.zeros((2, 2))))
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = decode_fn_mock
self._inputs_bidirectional_attention = False
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
# No RNG
model.predict_batch({}, batch)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertNotIn('decode_rng', decode_fn_kwargs)
# No RNG (w/ aux)
model.predict_batch_with_aux({}, batch)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertNotIn('decode_rng', decode_fn_kwargs)
# decoder_params RNG
model.predict_batch_with_aux({}, batch, decoder_params={'decode_rng': 3})
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 3)
# rng RNG
model.predict_batch({}, batch, rng=4)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 4)
# rng RNG (w/ aux)
model.predict_batch_with_aux({}, batch, rng=4)
_, decode_fn_kwargs = decode_fn_mock.call_args
self.assertEqual(decode_fn_kwargs['decode_rng'], 4)
# Both
with self.assertRaisesWithLiteralMatch(
ValueError, 'Got RNG both from the `rng` argument (4) and '
"`decoder_params['decode_rng']` (3). Please specify one or the other."):
model.predict_batch_with_aux({},
batch,
rng=4,
decoder_params={'decode_rng': 3})
def test_predict_batch_num_decodes_temperature_sample(self):
batch = {
'decoder_input_tokens': np.array([
[0, 3, 4, 5, 6, 0, 0],
]),
'decoder_causal_attention': np.array([
[1, 1, 1, 0, 0, 0, 0],
])
}
# These dummy logits represent the probability distribution where all the
# probability mass is in one item (i.e., degenerate distribution). For
# batch element 0, it is vocabulary index 2. We have two samples.
# Technically these should be identical since the prompts are the same, but
# this makes testing easier.
dummy_logits = jnp.expand_dims(
jnp.array([[-1e7, -1e7, 0, -1e7], [-1e7, -1e7, -1e7, 0]]), axis=1)
mock_module = mock.Mock()
mock_module.apply.return_value = (dummy_logits, {'cache': {}})
mock_module.dtype = jnp.float32
def mock_init(self):
self.module = mock_module
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = functools.partial(decoding.temperature_sample, topk=4)
self._inputs_bidirectional_attention = False
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
actual_output, aux = model.predict_batch_with_aux({},
batch,
num_decodes=2,
return_all_decodes=True)
expected_output = [[[2, 2, 2, 2, 2, 0, 0], [3, 3, 3, 3, 3, 0, 0]]]
expected_scores = [[0., 0.]]
# The expected progression of the first element of 'decoder_input_tokens':
# [0, 3, 4, 5, 6, 0, 0] -> [0, 3, 4, 0, 0, 0, 0] ->
# [3, 4, 2, 2, 2, 2, 2] -> [2, 2, 2, 2, 2, 0, 0]
# The expected progression of the second element of 'decoder_input_tokens':
# [0, 7, 8, 9, 0, 0, 0] -> [0, 7, 0, 0, 0, 0, 0] ->
# [7, 3, 3, 3, 3, 3, 3] -> [3, 3, 3, 3, 3, 3, 0]
np.testing.assert_array_equal(actual_output, expected_output)
np.testing.assert_array_equal(aux['scores'], expected_scores)
def test_predict_batch_fake_input_shapes_and_types(self):
# The input and causal attention actually have to be int32 for this test,
# even though the cache init should work with any types the `inputs` that
# is created from multiplying the causal attention and the input tokens
# needs to be an int or the decoding will fail.
batch = {
'decoder_input_tokens':
np.array([[0, 3, 4, 5, 6, 0, 0], [0, 7, 8, 9, 0, 0, 0]],
dtype=np.int32),
'decoder_causal_attention':
np.array([[1, 1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0]],
dtype=np.int32)
}
dummy_logits = jnp.ones((2, 1, 5), jnp.float32)
mock_module = mock.Mock()
mock_module.apply.return_value = (dummy_logits, {'cache': {}})
mock_module.dtype = jnp.float32
def mock_init(self):
self.module = mock_module
self._output_vocabulary = mock.Mock(eos_id=1)
self._decode_fn = functools.partial(decoding.temperature_sample, topk=4)
self._inputs_bidirectional_attention = False
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
model.predict_batch({}, batch)
fake_target = jnp.ones_like(batch['decoder_input_tokens'])
cache_init_call = mock_module.apply.call_args_list[0]
self.assertEqual(cache_init_call[0][0], {'params': {}})
np.testing.assert_allclose(cache_init_call[0][1], fake_target)
np.testing.assert_allclose(cache_init_call[0][2], fake_target)
self.assertEqual(cache_init_call[1], {
'decode': True,
'enable_dropout': False,
'mutable': ['cache']
})
@parameterized.named_parameters(
dict(
testcase_name='no_types',
shapes={'decoder_input_tokens': [1, 62]},
types=None),
dict(
testcase_name='int32',
shapes={'decoder_input_tokens': [1, 62]},
types={'decoder_input_tokens': jnp.int32}),
dict(
testcase_name='float32',
shapes={'decoder_input_tokens': [1, 62]},
types={'decoder_input_tokens': jnp.int32}),
)
def test_get_initial_variables_shapes_and_types(self, shapes, types):
mock_lm = mock.Mock()
mock_lm.init.return_value = {'params': {}}
mock_optimizer_def = mock.Mock()
rng = mock.Mock()
def mock_init(self):
self.module = mock_lm
self.optimizer_def = mock_optimizer_def
with mock.patch.object(models.DecoderOnlyModel, '__init__', new=mock_init):
model = models.DecoderOnlyModel()
model.get_initial_variables(rng, shapes, types)
if types is None:
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=jnp.float32)
else:
decoder_input = jnp.ones(
shapes['decoder_input_tokens'], dtype=types['decoder_input_tokens'])
# Using `.assert_called_once_with` doesn't work because the simple
# comparison it does for the array arguments fail (truth value of an array
# is ambiguous).
called_with = mock_lm.init.call_args
self.assertEqual(called_with[0][0], rng)
np.testing.assert_allclose(called_with[0][1], decoder_input)
np.testing.assert_allclose(called_with[0][2], decoder_input)
self.assertEqual(mock_lm.init.call_args[1], {'enable_dropout': False})
if __name__ == '__main__':
absltest.main()
|
import graphene
from django.contrib.auth import get_user_model
from graphene import ObjectType, Float, InputObjectType, Field, Mutation, List
from graphene_django import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_python_helpers import ramda as R
from rescape_python_helpers.geospatial.geometry_helpers import ewkt_from_feature_collection
from rescape_graphene import increment_prop_until_unique, enforce_unique_props
from rescape_graphene.graphql_helpers.json_field_helpers import model_resolver_for_dict_field, \
resolver_for_feature_collection, resolver_for_dict_field
from rescape_graphene.graphql_helpers.schema_helpers import REQUIRE, \
merge_with_django_properties, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, graphql_update_or_create, graphql_query, \
input_type_fields, DENY, IGNORE, top_level_allowed_filter_arguments, allowed_filter_arguments, \
update_or_create_with_revision, process_filter_kwargs, process_filter_kwargs_with_to_manys, query_sequentially, \
type_modify_fields
from rescape_graphene.schema_models.geojson.types.feature_collection import FeatureCollectionDataType, \
feature_collection_data_type_fields
from rescape_graphene.schema_models.user_schema import UserType, user_fields
from sample_webapp.models import Foo, Bar
class BarType(DjangoObjectType):
"""
This is the Graphene Type for Bar.
"""
id = graphene.Int(source='pk')
class Meta:
model = Bar
bar_fields = merge_with_django_properties(BarType, dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Bar, None, 'key', {})),
))
bar_mutation_config = dict(
class_name='Bar',
crud={
CREATE: 'createBar',
UPDATE: 'updateBar'
},
resolve=guess_update_or_create
)
foo_data_fields = dict(
example=dict(type=Float),
# References a User stored in a blob. This tests our ability to reference Django model instance ids in json blobs
# and resolve them correctly.
# For simplicity we limit fields to id. Mutations can only us id, and a query doesn't need other
# details of the user--it can query separately for that. We could offer all fields in a query only
# version of these fields
friend=dict(
type=UserType,
graphene_type=UserType,
fields=user_fields,
type_modifier=lambda *type_and_args: Field(
*type_and_args,
resolver=model_resolver_for_dict_field(get_user_model())
)
)
)
# This is the Graphene type for the Foo.data field. Note that we use foo_data_fields for the Field
# and pass them through type_modify_fields to handle the type_modifier lambda of Foo.data['friend']
FooDataType = type(
'FooDataType',
(ObjectType,),
type_modify_fields(foo_data_fields)
)
class FooType(DjangoObjectType):
"""
This is the Graphene Type for Foo.
"""
id = graphene.Int(source='pk')
class Meta:
model = Foo
# Modify data field to use the resolver.
# There's no way to specify a resolver and queryable fields upon field creation,
# since graphene just reads the underlying. Django model to generate the fields
FooType._meta.fields['data'] = Field(
FooDataType,
resolver=resolver_for_dict_field
)
FooType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
FooType._meta.fields['geo_collection'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_feature_collection
)
foo_fields = merge_with_django_properties(FooType, dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Foo, None, 'key', {})),
name=dict(create=REQUIRE),
bars=dict(
type=BarType,
graphene_type=BarType,
fields=bar_fields,
type_modifier=lambda *type_and_args: List(*type_and_args)
),
created_at=dict(),
updated_at=dict(),
# This refers to the FooDataType, which is a representation of all the json fields of Foo.data
data=dict(graphene_type=FooDataType, fields=foo_data_fields, default=lambda: dict()),
# This is a reference to a Django model instance.
user=dict(graphene_type=UserType, fields=user_fields),
geojson=dict(
create=REQUIRE,
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields
),
# This is just geojson as GeosGeometryCollection, so it maintains the geometry but loses other geojson properties
# It's kept synced to the geojson in the UpsertFoo mutate function. In practice this probably isn't needed
# since in PostGIS we could just extract the geometry from geojson
geo_collection=dict(
create=DENY,
update=DENY,
read=IGNORE,
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields,
)
))
class FooQuery(ObjectType):
id = graphene.Int(source='pk')
foos = graphene.List(
FooType,
**top_level_allowed_filter_arguments(foo_fields, FooType)
)
@login_required
def resolve_foos(self, info, **kwargs):
q_expressions_sets = process_filter_kwargs_with_to_manys(Foo, **kwargs)
return query_sequentially(Foo.objects, 'filter', q_expressions_sets)
foo_mutation_config = dict(
class_name='Foo',
crud={
CREATE: 'createFoo',
UPDATE: 'updateFoo'
},
resolve=guess_update_or_create
)
class UpsertFoo(Mutation):
"""
Abstract base class for mutation
"""
foo = Field(FooType)
def mutate(self, info, foo_data=None):
modified_foo_data = R.merge(
# Make sure unique fields are enforced, here by incrementing foo.key
enforce_unique_props(foo_fields, foo_data),
dict(
# Force the FeatureCollection geojson into the GEOSGeometryCollection. This is just Geometry
geo_collection=ewkt_from_feature_collection(foo_data['geojson']) if R.prop('geojson', foo_data) else {},
# Put the full FeatureCollection geojson into the geojson field.
geojson=foo_data['geojson'] if R.prop('geojson', foo_data) else {}
)
)
update_or_create_values = input_type_parameters_for_update_or_create(foo_fields, modified_foo_data)
foo, created = update_or_create_with_revision(Foo, update_or_create_values)
return UpsertFoo(foo=foo)
class CreateFoo(UpsertFoo):
"""
Create Foo mutation class
"""
class Arguments:
foo_data = type('CreateFooInputType', (InputObjectType,),
input_type_fields(foo_fields, CREATE, FooType))(required=True)
class UpdateFoo(UpsertFoo):
"""
Update Foo mutation class
"""
class Arguments:
foo_data = type('UpdateFooInputType', (InputObjectType,),
input_type_fields(foo_fields, UPDATE, FooType))(required=True)
graphql_update_or_create_bar = graphql_update_or_create(bar_mutation_config, bar_fields)
graphql_query_bars = graphql_query(BarType, bar_fields, 'bars')
graphql_update_or_create_foo = graphql_update_or_create(foo_mutation_config, foo_fields)
graphql_query_foos = graphql_query(FooType, foo_fields, 'foos')
class FooMutation(graphene.ObjectType):
create_foo = CreateFoo.Field()
update_foo = UpdateFoo.Field()
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2017-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import collections.abc
import enum
import io
import itertools
import json
import multiprocessing
import multiprocessing.reduction
import multiprocessing.util
import os
import random
import re
import sys
import threading
import time
import types
import unittest.case
import unittest.result
import unittest.runner
import unittest.signals
import warnings
import click
import edgedb
from edb.common import devmode
from edb.testbase import server as tb
from . import mproc_fixes
from . import styles
result: Optional[unittest.result.TestResult] = None
coverage_run = None
def teardown_suite() -> None:
# The TestSuite methods are mutating the *result* object,
# and the suite itself does not hold any state whatsoever,
# and, in our case specifically, it doesn't even hold
# references to tests being run, so we can think of
# its methods as static.
suite = StreamingTestSuite()
suite._tearDownPreviousClass(None, result)
suite._handleModuleTearDown(result)
def init_worker(status_queue: multiprocessing.SimpleQueue,
param_queue: multiprocessing.SimpleQueue,
result_queue: multiprocessing.SimpleQueue) -> None:
global result
global coverage_run
# Make sure the generator is re-seeded, as we have inherited
# the seed from the parent process.
random.seed()
result = ChannelingTestResult(result_queue)
if not param_queue.empty():
server_addr = param_queue.get()
if server_addr is not None:
os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = json.dumps(server_addr)
coverage_run = devmode.CoverageConfig.start_coverage_if_requested()
status_queue.put(True)
def shutdown_worker() -> None:
global coverage_run
teardown_suite()
if coverage_run is not None:
coverage_run.stop()
coverage_run.save()
class StreamingTestSuite(unittest.TestSuite):
_cleanup = False
def run(self, test, result):
with warnings.catch_warnings(record=True) as ww:
warnings.resetwarnings()
warnings.simplefilter('default')
self._run(test, result)
if ww:
for wmsg in ww:
if wmsg.source is not None:
wmsg.source = str(wmsg.source)
result.addWarning(test, wmsg)
def _run(self, test, result):
result._testRunEntered = True
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
return
start = time.monotonic()
test.run(result)
elapsed = time.monotonic() - start
result.record_test_stats(test, {'running-time': elapsed})
result._testRunEntered = False
return result
def _run_test(workload):
suite = StreamingTestSuite()
if isinstance(workload, collections.abc.Iterable):
# Got a test suite
for test in workload:
suite.run(test, result)
else:
suite.run(workload, result)
def _is_exc_info(args):
return (
isinstance(args, tuple) and
len(args) == 3 and
issubclass(args[0], BaseException)
)
class ChannelingTestResultMeta(type):
@staticmethod
def get_wrapper(meth):
def _wrapper(self, *args, **kwargs):
args = list(args)
if args and _is_exc_info(args[-1]):
# exc_info triple
error_text = self._exc_info_to_string(args[-1], args[0])
args[-1] = error_text
self._queue.put((meth, args, kwargs))
return _wrapper
def __new__(mcls, name, bases, dct):
for meth in {'startTest', 'addSuccess', 'addError', 'addFailure',
'addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'addSubTest', 'addWarning', 'record_test_stats'}:
dct[meth] = mcls.get_wrapper(meth)
return super().__new__(mcls, name, bases, dct)
class ChannelingTestResult(unittest.result.TestResult,
metaclass=ChannelingTestResultMeta):
def __init__(self, queue):
super().__init__(io.StringIO(), False, 1)
self._queue = queue
def _setupStdout(self):
pass
def _restoreStdout(self):
pass
def printErrors(self):
pass
def printErrorList(self, flavour, errors):
pass
def __getstate__(self):
state = self.__dict__.copy()
state.pop('_queue')
state.pop('_original_stdout')
state.pop('_original_stderr')
return state
def monitor_thread(queue, result):
while True:
methname, args, kwargs = queue.get()
if methname is None and args is None and kwargs is None:
# This must be the last message in the queue, injected
# when all tests are completed and the pool is about
# to be closed.
break
method = result
for part in methname.split('.'):
method = getattr(method, part)
method(*args, **kwargs)
class ParallelTestSuite(unittest.TestSuite):
def __init__(self, tests, server_conn, num_workers):
self.tests = tests
self.server_conn = server_conn
self.num_workers = num_workers
self.stop_requested = False
def run(self, result):
# We use SimpleQueues because they are more predictable.
# They do the necessary IO directly, without using a
# helper thread.
result_queue = multiprocessing.SimpleQueue()
status_queue = multiprocessing.SimpleQueue()
worker_param_queue = multiprocessing.SimpleQueue()
# Prepopulate the worker param queue with server connection
# information.
for _ in range(self.num_workers):
worker_param_queue.put(self.server_conn)
result_thread = threading.Thread(
name='test-monitor', target=monitor_thread,
args=(result_queue, result), daemon=True)
result_thread.start()
initargs = (status_queue, worker_param_queue, result_queue)
pool = multiprocessing.Pool(
self.num_workers,
initializer=mproc_fixes.WorkerScope(init_worker, shutdown_worker),
initargs=initargs)
# Wait for all workers to initialize.
for _ in range(self.num_workers):
status_queue.get()
with pool:
ar = pool.map_async(_run_test, iter(self.tests), chunksize=1)
while True:
try:
ar.get(timeout=0.1)
except multiprocessing.TimeoutError:
if self.stop_requested:
break
else:
continue
else:
break
# Post the terminal message to the queue so that
# test-monitor can stop.
result_queue.put((None, None, None))
# Give the test-monitor thread some time to
# process the queue messages. If something
# goes wrong, the thread will be forcibly
# joined by a timeout.
result_thread.join(timeout=3)
# Wait for pool to shutdown, this includes test teardowns.
pool.join()
return result
class SequentialTestSuite(unittest.TestSuite):
def __init__(self, tests, server_conn):
self.tests = tests
self.server_conn = server_conn
self.stop_requested = False
def run(self, result_):
global result
result = result_
if self.server_conn:
os.environ['EDGEDB_TEST_CLUSTER_ADDR'] = \
json.dumps(self.server_conn)
for test in self.tests:
_run_test(test)
if self.stop_requested:
break
# Make sure the class and the module teardown methods are
# executed for the trailing test, _run_test() does not do
# this for us.
teardown_suite()
return result
class Markers(enum.Enum):
passed = '.'
errored = 'E'
skipped = 's'
failed = 'F'
xfailed = 'x' # expected fail
not_implemented = '-'
upassed = 'U' # unexpected success
class OutputFormat(enum.Enum):
auto = 'auto'
simple = 'simple'
stacked = 'stacked'
verbose = 'verbose'
class BaseRenderer:
def __init__(self, *, tests, stream):
self.stream = stream
self.styles_map = {
marker.value: getattr(styles, f'marker_{marker.name}')
for marker in Markers}
def format_test(self, test):
if isinstance(test, unittest.case._SubTest):
if test.params:
params = ', '.join(
f'{k}={v!r}' for k, v in test.params.items())
else:
params = '<subtest>'
return f'{test.test_case} {{{params}}}'
else:
if hasattr(test, 'fail_notes') and test.fail_notes:
fail_notes = ', '.join(
f'{k}={v!r}' for k, v in test.fail_notes.items())
return f'{test} {{{fail_notes}}}'
else:
return str(test)
def report(self, test, marker, description=None):
raise NotImplementedError
class SimpleRenderer(BaseRenderer):
def report(self, test, marker, description=None):
click.echo(self.styles_map[marker.value](marker.value),
nl=False, file=self.stream)
class VerboseRenderer(BaseRenderer):
fullnames = {
Markers.passed: 'OK',
Markers.errored: 'ERROR',
Markers.skipped: 'SKIPPED',
Markers.failed: 'FAILED',
Markers.xfailed: 'expected failure',
Markers.not_implemented: 'not implemented',
Markers.upassed: 'unexpected success',
}
def _render_test(self, test, marker, description):
test_title = self.format_test(test)
if description:
return f'{test_title}: {self.fullnames[marker]}: {description}'
else:
return f'{test_title}: {self.fullnames[marker]}'
def report(self, test, marker, description=None):
style = self.styles_map[marker.value]
click.echo(style(self._render_test(test, marker, description)),
file=self.stream)
class MultiLineRenderer(BaseRenderer):
FT_LABEL = 'First few failed: '
FT_LABEL_LEN = len(FT_LABEL)
FT_MAX_LINES = 3
def __init__(self, *, tests, stream):
super().__init__(tests=tests, stream=stream)
self.total_tests = len(tests)
self.completed_tests = 0
test_modules = {test.__class__.__module__ for test in tests}
max_test_module_len = max((len(self._render_modname(name))
for name in test_modules), default=0)
self.first_col_width = max_test_module_len + 1 # 1 == len(' ')
self.failed_tests = set()
self.buffer = collections.defaultdict(str)
self.last_lines = -1
self.max_lines = 0
def report(self, test, marker, description=None):
if marker in {Markers.failed, Markers.errored}:
test_name = test.id().rpartition('.')[2]
if ' ' in test_name:
test_name = test_name.split(' ')[0]
self.failed_tests.add(test_name)
self.buffer[test.__class__.__module__] += marker.value
self.completed_tests += 1
self._render()
def _render_modname(self, name):
return name.replace('.', '/') + '.py'
def _color_second_column(self, line, style):
return line[:self.first_col_width] + style(line[self.first_col_width:])
def _render(self):
def print_line(line):
if len(line) < cols:
line += ' ' * (cols - len(line))
lines.append(line)
def print_empty_line():
print_line(' ')
last_render = self.completed_tests == self.total_tests
cols, rows = click.get_terminal_size()
second_col_width = cols - self.first_col_width
clear_cmd = ''
if self.last_lines > 0:
# Move cursor up `last_lines` times.
clear_cmd = f'\r\033[{self.last_lines}A'
lines = []
for mod, progress in self.buffer.items():
line = self._render_modname(mod).ljust(self.first_col_width, ' ')
while progress:
second_col = progress[:second_col_width]
second_col = second_col.ljust(second_col_width, ' ')
progress = progress[second_col_width:]
# Apply styles *after* slicing and padding the string
# (otherwise ANSI codes could be sliced in half).
second_col = re.sub(
r'\S',
lambda x: self.styles_map[x[0]](x[0]),
second_col)
lines.append(f'{line}{second_col}')
if line[0] != ' ':
line = ' ' * self.first_col_width
if (not last_render and
self.failed_tests and
self.FT_LABEL_LEN <= self.first_col_width and
cols - self.first_col_width > 40):
print_empty_line()
line = (
self.FT_LABEL +
' ' * (self.first_col_width - self.FT_LABEL_LEN)
)
failed_tests_lines = 1
for testi, test in enumerate(self.failed_tests, 1):
last = testi == len(self.failed_tests)
if not last:
test += ', '
test_name_len = len(test)
if len(line) + test_name_len < cols:
line += test
else:
if failed_tests_lines == self.FT_MAX_LINES:
if len(line) + 3 < cols:
line += '...'
break
else:
line += (cols - len(line)) * ' '
line = self._color_second_column(
line, styles.marker_errored)
lines.append(line)
failed_tests_lines += 1
line = self.first_col_width * ' '
if len(line) + test_name_len > cols:
continue
line += test
line += (cols - len(line)) * ' '
line = self._color_second_column(line, styles.marker_errored)
lines.append(line)
print_empty_line()
print_line(
f'Progress: {self.completed_tests}/{self.total_tests} tests.')
if last_render:
if self.max_lines > len(lines):
for _ in range(self.max_lines - len(lines)):
lines.insert(0, ' ' * cols)
else:
# If it's not the last test, check if our render buffer
# requires more rows than currently visible.
if len(lines) + 1 > rows:
# Scroll the render buffer to the bottom and
# cut the lines from the beginning, so that it
# will fit the screen.
#
# We need to do this because we can't move the
# cursor past the visible screen area, so if we
# render more data than the screen can fit, we
# will have lot's of garbage output.
lines = lines[len(lines) + 1 - rows:]
lines[0] = '^' * cols
# Hide cursor.
print('\033[?25l', end='', flush=True, file=self.stream)
try:
# Use `print` (not `click.echo`) because we want to
# precisely control when the output is flushed.
print(clear_cmd + '\n'.join(lines), flush=False, file=self.stream)
finally:
# Show cursor.
print('\033[?25h', end='', flush=True, file=self.stream)
self.last_lines = len(lines)
self.max_lines = max(self.last_lines, self.max_lines)
class ParallelTextTestResult(unittest.result.TestResult):
def __init__(self, *, stream, verbosity, warnings, tests,
output_format=OutputFormat.auto, failfast=False, suite):
super().__init__(stream, False, verbosity)
self.verbosity = verbosity
self.catch_warnings = warnings
self.failfast = failfast
self.test_stats = []
self.warnings = []
self.notImplemented = []
# An index of all seen warnings to keep track
# of repeated warnings.
self._warnings = {}
self.suite = suite
if (output_format is OutputFormat.verbose or
(output_format is OutputFormat.auto and self.verbosity > 1)):
self.ren = VerboseRenderer(tests=tests, stream=stream)
elif (output_format is OutputFormat.stacked or
(output_format is OutputFormat.auto and stream.isatty() and
click.get_terminal_size()[0] > 60 and
os.name != 'nt')):
self.ren = MultiLineRenderer(tests=tests, stream=stream)
else:
self.ren = SimpleRenderer(tests=tests, stream=stream)
def record_test_stats(self, test, stats):
self.test_stats.append((test, stats))
def _exc_info_to_string(self, err, test):
# Errors are serialized in the worker.
return err
def getDescription(self, test):
return self.ren.format_test(test)
def addSuccess(self, test):
super().addSuccess(test)
self.ren.report(test, Markers.passed)
def addError(self, test, err):
super().addError(test, err)
self.ren.report(test, Markers.errored)
if self.failfast:
self.suite.stop_requested = True
def addFailure(self, test, err):
super().addFailure(test, err)
self.ren.report(test, Markers.failed)
if self.failfast:
self.suite.stop_requested = True
def addSubTest(self, test, subtest, err):
if err is not None:
self.errors.append((subtest, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.ren.report(subtest, Markers.errored)
if self.failfast:
self.suite.stop_requested = True
def addSkip(self, test, reason):
super().addSkip(test, reason)
self.ren.report(test, Markers.skipped)
def addExpectedFailure(self, test, err):
method = getattr(test, test._testMethodName)
try:
reason = method.__et_xfail_reason__
not_impl = getattr(method, '__et_xfail_not_implemented__', False)
except AttributeError:
# Maybe the whole test case class is decorated?
reason = getattr(test, '__et_xfail_reason__', None)
not_impl = getattr(test, '__et_xfail_not_implemented__', False)
marker = Markers.not_implemented if not_impl else Markers.xfailed
if not_impl:
self.notImplemented.append(
(test, self._exc_info_to_string(err, test)))
else:
super().addExpectedFailure(test, err)
self.ren.report(test, marker, reason)
def addUnexpectedSuccess(self, test):
super().addUnexpectedSuccess(test)
self.ren.report(test, Markers.upassed)
def addWarning(self, test, wmsg):
if not self.catch_warnings:
return
key = str(wmsg.message), wmsg.filename, wmsg.lineno
if key not in self._warnings:
self._warnings[key] = wmsg
self.warnings.append((test, warnings.formatwarning(
wmsg.message, wmsg.category, wmsg.filename, wmsg.lineno,
wmsg.line
)))
def wasSuccessful(self):
# Overload TestResult.wasSuccessful to ignore unexpected successes
return (len(self.failures) == len(self.errors) == 0)
class ParallelTextTestRunner:
def __init__(self, *, stream=None, num_workers=1, verbosity=1,
output_format=OutputFormat.auto, warnings=True,
failfast=False):
self.stream = stream if stream is not None else sys.stderr
self.num_workers = num_workers
self.verbosity = verbosity
self.warnings = warnings
self.failfast = failfast
self.output_format = output_format
def run(self, test):
session_start = time.monotonic()
cases = tb.get_test_cases([test])
setup = tb.get_test_cases_setup(cases)
bootstrap_time_taken = 0
tests_time_taken = 0
result = None
cluster = None
conn = None
try:
if setup:
self._echo('Populating test databases... ',
fg='white', nl=False)
cluster = tb._init_cluster(cleanup_atexit=False)
conn = cluster.get_connect_args()
tb.setup_test_cases(
cases,
conn,
self.num_workers)
os.environ.update({
'EDGEDB_TEST_CASES_SET_UP': "1"
})
bootstrap_time_taken = time.monotonic() - session_start
self._echo('OK.')
start = time.monotonic()
all_tests = list(itertools.chain.from_iterable(
tests for tests in cases.values()))
if self.num_workers > 1:
suite = ParallelTestSuite(
self._sort_tests(cases),
conn,
self.num_workers)
else:
suite = SequentialTestSuite(
self._sort_tests(cases),
conn
)
result = ParallelTextTestResult(
stream=self.stream, verbosity=self.verbosity,
warnings=self.warnings, failfast=self.failfast,
output_format=self.output_format,
tests=all_tests, suite=suite)
unittest.signals.registerResult(result)
self._echo()
suite.run(result)
tests_time_taken = time.monotonic() - start
except KeyboardInterrupt:
raise
finally:
if self.verbosity == 1:
self._echo()
if setup:
self._echo()
self._echo('Shutting down test cluster... ', nl=False)
tb._shutdown_cluster(cluster, destroy=True)
self._echo('OK.')
if result is not None:
self._render_result(
result, bootstrap_time_taken, tests_time_taken)
return result
def _get_term_width(self):
return click.get_terminal_size()[0] or 70
def _echo(self, s='', **kwargs):
if self.verbosity > 0:
click.secho(s, file=self.stream, **kwargs)
def _fill(self, char, **kwargs):
self._echo(char * self._get_term_width(), **kwargs)
def _format_time(self, seconds):
hours = int(seconds // 3600)
seconds %= 3600
minutes = int(seconds // 60)
seconds %= 60
return f'{hours:02d}:{minutes:02d}:{seconds:04.1f}'
def _print_errors(self, result):
uxsuccesses = ((s, '') for s in result.unexpectedSuccesses)
data = zip(
('WARNING', 'ERROR', 'FAIL', 'UNEXPECTED SUCCESS'),
('yellow', 'red', 'red', 'red'),
(result.warnings, result.errors, result.failures, uxsuccesses)
)
for kind, fg, errors in data:
for test, err in errors:
self._fill('=', fg=fg)
self._echo(f'{kind}: {result.getDescription(test)}',
fg=fg, bold=True)
self._fill('-', fg=fg)
if _is_exc_info(err):
if isinstance(err[1], edgedb.EdgeDBError):
srv_tb = err[1].get_server_context()
if srv_tb:
self._echo('Server Traceback:',
fg='red', bold=True)
self._echo(srv_tb)
self._echo('Test Traceback:',
fg='red', bold=True)
err = unittest.result.TestResult._exc_info_to_string(
result, err, test)
self._echo(err)
def _render_result(self, result, boot_time_taken, tests_time_taken):
self._echo()
if self.verbosity > 0:
self._print_errors(result)
if result.wasSuccessful():
fg = 'green'
outcome = 'SUCCESS'
else:
fg = 'red'
outcome = 'FAILURE'
if self.verbosity > 1:
self._fill('=', fg=fg)
self._echo(outcome, fg=fg, bold=True)
counts = [('tests ran', result.testsRun)]
display = {
'expectedFailures': 'expected failures',
'notImplemented': 'not implemented',
'unexpectedSuccesses': 'unexpected successes',
}
for bit in ['failures', 'errors', 'expectedFailures',
'notImplemented', 'unexpectedSuccesses', 'skipped']:
count = len(getattr(result, bit))
if count:
counts.append((display.get(bit, bit), count))
for bit, count in counts:
self._echo(f' {bit}: ', nl=False)
self._echo(f'{count}', bold=True)
self._echo()
self._echo(f'Running times: ')
if boot_time_taken:
self._echo(' bootstrap: ', nl=False)
self._echo(self._format_time(boot_time_taken), bold=True)
self._echo(' tests: ', nl=False)
self._echo(self._format_time(tests_time_taken), bold=True)
if boot_time_taken:
self._echo(' total: ', nl=False)
self._echo(self._format_time(boot_time_taken + tests_time_taken),
bold=True)
self._echo()
return result
def _sort_tests(self, cases):
serialized_suites = {
casecls: unittest.TestSuite(tests)
for casecls, tests in cases.items()
if getattr(casecls, 'SERIALIZED', False)
}
tests = itertools.chain(
serialized_suites.values(),
itertools.chain.from_iterable(
tests for casecls, tests in cases.items()
if casecls not in serialized_suites
)
)
return list(tests)
# Disable pickling of traceback objects in multiprocessing.
# Test errors' tracebacks are serialized manually by
# `TestReesult._exc_info_to_string()`. Therefore we need
# to make sure that some random __traceback__ attribute
# doesn't crash the test results queue.
multiprocessing.reduction.ForkingPickler.register(
types.TracebackType,
lambda o: (_restore_Traceback, ()))
def _restore_Traceback():
return None
|
#!/usr/bin/env python3
from flask import Flask
from flask import request
from flask.json import jsonify
from flask_cors import CORS, cross_origin
import subprocess
import os
app = Flask(__name__)
CORS(app)
@app.route("/test")
def test():
return "Hello World"
@app.route("/model", methods=['GET'])
def r_sub():
lat = request.args.get('lat', default=51.86215)
long = request.args.get('long', default=-2.049923)
lc = request.args.get('lc', default=1)
weather = request.args.get('weather', default=1)
day = request.args.get('day', default=6)
p = subprocess.Popen(['Rscript', '/Users/krishna/MOOC/hackupc/ml_code/predict.r', lat, long], stdout=subprocess.PIPE)
p.wait()
data = p.stdout.read()
return data
if __name__ == "__main__":
app.run(host='0.0.0.0', port='8889')
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides regular expresion functions for matching ansi escape sequences."""
import re
_ansi_re = re.compile(r'\033\[\d{1,2}[m]')
_ansi_re_group = re.compile(r'(\033\[\d{1,2}[m])')
def split_by_ansi_escape_sequence(string, include_delimiters=False):
"""
Splits a string into a list using any ansi escape sequence as a delimiter.
:param string: string to be split
:type string: str
:param include_delimiters: If True include matched escape sequences in
the list (default: False)
:type include_delimiters: bool
:returns: list of strings, split from original string by escape sequences
:rtype: list
"""
global _ansi_re, _ansi_re_group
if include_delimiters:
return _ansi_re_group.split(string)
return _ansi_re.split(string)
def remove_ansi_escape_senquences(string):
"""
Removes any ansi escape sequences found in the given string and returns it.
"""
global _ansi_re
return _ansi_re.sub('', string)
|
"""
This file offers the methods to automatically retrieve the graph Vavraia culicis subsp. floridensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def VavraiaCulicisSubspFloridensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Vavraia culicis subsp. floridensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Vavraia culicis subsp. floridensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="VavraiaCulicisSubspFloridensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
'''
Support for APT (Advanced Packaging Tool)
'''
# Import python libs
import os
import re
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Confirm this module is on a Debian based system
'''
return 'pkg' if __grains__['os_family'] == 'Debian' else False
def __init__(opts):
'''
For Debian and derivative systems, set up
a few env variables to keep apt happy and
non-interactive.
'''
if __virtual__():
env_vars = {
'APT_LISTBUGS_FRONTEND': 'none',
'APT_LISTCHANGES_FRONTEND': 'none',
'DEBIAN_FRONTEND': 'noninteractive',
'UCF_FORCE_CONFFOLD': '1',
}
# Export these puppies so they persist
os.environ.update(env_vars)
def _pkgname_without_arch(name):
'''
Check for ':arch' appended to pkg name (i.e. 32 bit installed on 64 bit
machine is ':i386')
'''
if name.find(':') >= 0:
return name.split(':')[0]
return name
def available_version(*names):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example::
salt '*' pkg.available_version <package name>
salt '*' pkg.available_version <package1> <package2> <package3> ...
'''
if len(names) == 0:
return ''
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ''
pkgs = list_pkgs()
for name in names:
cmd = 'apt-cache -q policy {0} | grep Candidate'.format(name)
candidate = __salt__['cmd.run_stdout'](cmd).split()
if len(candidate) >= 2:
candidate = candidate[-1]
else:
candidate = ''
installed = pkgs.get(name, '')
if candidate:
if not installed or compare(installed, candidate) == -1:
ret[name] = candidate
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
def version(*names):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example::
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
pkgs = list_pkgs()
if len(names) == 0:
return ''
elif len(names) == 1:
return pkgs.get(_pkgname_without_arch(names[0]), '')
else:
ret = {}
for name in names:
ret[name] = pkgs.get(_pkgname_without_arch(name), '')
return ret
def refresh_db():
'''
Updates the APT database to latest packages based upon repositories
Returns a dict::
{'<database name>': Bool}
CLI Example::
salt '*' pkg.refresh_db
'''
cmd = 'apt-get -q update'
out = __salt__['cmd.run_stdout'](cmd)
servers = {}
for line in out:
cols = line.split()
if not len(cols):
continue
ident = ' '.join(cols[1:4])
if 'Get' in cols[0]:
servers[ident] = True
else:
servers[ident] = False
return servers
def install(name=None, refresh=False, fromrepo=None, skip_verify=False,
debconf=None, pkgs=None, sources=None, **kwargs):
'''
Install the passed package, add refresh=True to update the dpkg database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
CLI Example::
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
fromrepo
Specify a package repository to install from
(e.g., ``apt-get -t unstable install somepackage``)
skip_verify
Skip the GPG verification check (e.g., ``--allow-unauthenticated``, or
``--force-bad-verify`` for install from package file).
debconf
Provide the path to a debconf answers file, processed before
installation.
version
Install a specific version of the package, e.g. 1.0.9~ubuntu. Ignored
if "pkgs" or "sources" is passed.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example::
salt '*' pkg.install pkgs='["foo","bar"]'
sources
A list of DEB packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example::
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
# Note that this function will daemonize the subprocess
# preventing a restart resulting from a salt-minion upgrade
# from killing the apt and hence hosing the dpkg database
salt.utils.daemonize_if(__opts__, **kwargs)
# Catch both boolean input from state and string input from CLI
if refresh is True or str(refresh).lower() == 'true':
refresh_db()
if debconf:
__salt__['debconf.set_file'](debconf)
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](name,
pkgs,
sources)
# Support old "repo" argument
repo = kwargs.get('repo', '')
if not fromrepo and repo:
fromrepo = repo
if kwargs.get('env'):
try:
os.environ.update(kwargs.get('env'))
except Exception as e:
log.exception(e)
if pkg_params is None or len(pkg_params) == 0:
return {}
elif pkg_type == 'file':
cmd = 'dpkg -i {verify} {pkg}'.format(
verify='--force-bad-verify' if skip_verify else '',
pkg=' '.join(pkg_params),
)
elif pkg_type == 'repository':
fname = ' '.join(pkg_params)
if len(pkg_params) == 1:
for vkey, vsign in (('eq', '='), ('version', '=')):
if kwargs.get(vkey) is not None:
fname = '"{0}{1}{2}"'.format(fname, vsign, kwargs[vkey])
break
if fromrepo:
log.info('Targeting repo "{0}"'.format(fromrepo))
cmd = 'apt-get -q -y {confold} {confdef} {verify} {target} install ' \
'{pkg}'.format(
confold='-o DPkg::Options::=--force-confold',
confdef='-o DPkg::Options::=--force-confdef',
verify='--allow-unauthenticated' if skip_verify else '',
target='-t {0}'.format(fromrepo) if fromrepo else '',
pkg=fname,
)
old = list_pkgs()
__salt__['cmd.run_all'](cmd)
new = list_pkgs()
return __salt__['pkg_resource.find_changes'](old, new)
def remove(pkg, **kwargs):
'''
Remove a single package via ``apt-get remove``
Returns a list containing the names of the removed packages.
CLI Example::
salt '*' pkg.remove <package name>
'''
ret_pkgs = []
old_pkgs = list_pkgs()
if kwargs.get('env'):
try:
os.environ.update(kwargs.get('env'))
except Exception as e:
log.exception(e)
cmd = 'apt-get -q -y remove {0}'.format(pkg)
__salt__['cmd.run'](cmd)
new_pkgs = list_pkgs()
for pkg in old_pkgs:
if pkg not in new_pkgs:
ret_pkgs.append(pkg)
return ret_pkgs
def purge(pkg, **kwargs):
'''
Remove a package via ``apt-get purge`` along with all configuration
files and unused dependencies.
Returns a list containing the names of the removed packages
CLI Example::
salt '*' pkg.purge <package name>
'''
ret_pkgs = []
old_pkgs = list_pkgs()
if kwargs.get('env'):
try:
os.environ.update(kwargs.get('env'))
except Exception as e:
log.exception(e)
# Remove inital package
purge_cmd = 'apt-get -q -y purge {0}'.format(pkg)
__salt__['cmd.run'](purge_cmd)
new_pkgs = list_pkgs()
for pkg in old_pkgs:
if pkg not in new_pkgs:
ret_pkgs.append(pkg)
return ret_pkgs
def upgrade(refresh=True, **kwargs):
'''
Upgrades all packages via ``apt-get dist-upgrade``
Returns a list of dicts containing the package names, and the new and old
versions::
[
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}
}',
...
]
CLI Example::
salt '*' pkg.upgrade
'''
salt.utils.daemonize_if(__opts__, **kwargs)
# Catch both boolean input from state and string input from CLI
if refresh is True or str(refresh).lower() == 'true':
refresh_db()
ret_pkgs = {}
old_pkgs = list_pkgs()
cmd = 'apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef dist-upgrade'
__salt__['cmd.run'](cmd)
new_pkgs = list_pkgs()
for pkg in new_pkgs:
if pkg in old_pkgs:
if old_pkgs[pkg] == new_pkgs[pkg]:
continue
else:
ret_pkgs[pkg] = {'old': old_pkgs[pkg],
'new': new_pkgs[pkg]}
else:
ret_pkgs[pkg] = {'old': '',
'new': new_pkgs[pkg]}
return ret_pkgs
def list_pkgs(regex_string=''):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
External dependencies::
Virtual package resolution requires aptitude.
Without aptitude virtual packages will be reported as not installed.
CLI Example::
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs httpd
'''
ret = {}
cmd = (
'dpkg-query --showformat=\'${{Status}} ${{Package}} ${{Version}}\n\' '
'-W {0}'.format(
regex_string
)
)
out = __salt__['cmd.run_stdout'](cmd)
# Typical line of output:
# install ok installed zsh 4.3.17-1ubuntu1
for line in out.splitlines():
cols = line.split()
if len(cols) and ('install' in cols[0] or 'hold' in cols[0]) and \
'installed' in cols[2]:
__salt__['pkg_resource.add_pkg'](ret, cols[3], cols[4])
# Check for virtual packages. We need aptitude for this.
if __salt__['cmd.has_exec']('aptitude'):
if not ret:
search_string = regex_string
else:
search_string = '.+'
cmd = 'aptitude search "?name(^{0}$) ?virtual ' \
'?reverse-provides(?installed)"'.format(search_string)
out = __salt__['cmd.run_stdout'](cmd)
for line in out.splitlines():
# Setting all matching 'installed' virtual package versions to 1
try:
name = line.split()[1]
except IndexError:
continue
__salt__['pkg_resource.add_pkg'](ret, name, '1')
__salt__['pkg_resource.sort_pkglist'](ret)
return ret
def _get_upgradable():
'''
Utility function to get upgradable packages
Sample return data:
{ 'pkgname': '1.2.3-45', ... }
'''
cmd = 'apt-get --just-print dist-upgrade'
out = __salt__['cmd.run_stdout'](cmd)
# rexp parses lines that look like the following:
## Conf libxfont1 (1:1.4.5-1 Debian:testing [i386])
rexp = re.compile('(?m)^Conf '
'([^ ]+) ' # Package name
'\(([^ ]+) ' # Version
'([^ ]+)' # Release
'(?: \[([^\]]+)\])?\)$') # Arch
keys = ['name', 'version', 'release', 'arch']
_get = lambda l, k: l[keys.index(k)]
upgrades = rexp.findall(out)
ret = {}
for line in upgrades:
name = _get(line, 'name')
version = _get(line, 'version')
ret[name] = version
return ret
def list_upgrades(refresh=True):
'''
List all available package upgrades.
CLI Example::
salt '*' pkg.list_upgrades
'''
# Catch both boolean input from state and string input from CLI
if refresh is True or str(refresh).lower() == 'true':
refresh_db()
return _get_upgradable()
def upgrade_available(name):
'''
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
'''
return available_version(name) != ''
def compare(version1='', version2=''):
'''
Compare two version strings. Return -1 if version1 < version2,
0 if version1 == version2, and 1 if version1 > version2. Return None if
there was a problem making the comparison.
CLI Example::
salt '*' pkg.compare '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1'
'''
try:
for oper, ret in (('lt', -1), ('eq', 0), ('gt', 1)):
cmd = 'dpkg --compare-versions "{0}" {1} ' \
'"{2}"'.format(version1, oper, version2)
if __salt__['cmd.retcode'](cmd) == 0:
return ret
except Exception as e:
log.error(e)
return None
|
#Stack using python list
import random
class Stack(object):
def __init__(self):
self._stack = []
def push(self, value):
print("PUSHING {} ON THE STACK".format(value))
self._stack.append(value)
def pop(self):
print("POPPING NUMBER FROM STACK")
if self._stack:
return self._stack.pop()
else:
print("STACK IS EMPTY")
def top(self):
print("TOP NUMBER ON THE STACK")
if self._stack:
return self._stack[-1]
else:
print("STACK IS EMPTY")
def display(self):
print("PRINTING THE STACK")
for i, val in enumerate(self._stack[::-1]):
print(val)
if i != len(self._stack) - 1:
print("|")
obj = Stack()
for i in range(10):
obj.push(random.randint(1, 100))
obj.display()
obj.pop()
obj.display()
obj.push(112)
obj.display()
obj.pop()
obj.display()
obj.pop()
obj.display()
print(obj.top())
print("\n")
|
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from functools import partial
import tensorflow as tf
from lmnet.networks.object_detection.yolo_v2 import YoloV2
class YoloV2Quantize(YoloV2):
"""Quantize YOLOv2 Network.
It is based on original YOLO v2.
"""
def __init__(
self,
quantize_first_convolution=True,
quantize_last_convolution=True,
activation_quantizer=None,
activation_quantizer_kwargs=None,
weight_quantizer=None,
weight_quantizer_kwargs=None,
*args,
**kwargs
):
"""
Args:
quantize_first_convolution(bool): use quantization in first conv.
quantize_last_convolution(bool): use quantization in last conv.
weight_quantizer (callable): weight quantizer.
weight_quantize_kwargs(dict): Initialize kwargs for weight quantizer.
activation_quantizer (callable): activation quantizer
activation_quantize_kwargs(dict): Initialize kwargs for activation quantizer.
"""
super().__init__(
*args,
**kwargs,
)
self.quantize_first_convolution = quantize_first_convolution
self.quantize_last_convolution = quantize_last_convolution
activation_quantizer_kwargs = activation_quantizer_kwargs if not None else {}
weight_quantizer_kwargs = weight_quantizer_kwargs if not None else {}
assert callable(weight_quantizer)
assert callable(activation_quantizer)
self.weight_quantization = weight_quantizer(**weight_quantizer_kwargs)
self.activation = activation_quantizer(**activation_quantizer_kwargs)
if self.quantize_last_convolution:
self.before_last_activation = self.activation
else:
self.before_last_activation = lambda x: tf.nn.leaky_relu(x, alpha=0.1, name="leaky_relu")
@staticmethod
def _quantized_variable_getter(
weight_quantization,
quantize_first_convolution,
quantize_last_convolution,
getter,
name,
*args,
**kwargs):
"""Get the quantized variables.
Use if to choose or skip the target should be quantized.
Args:
getter: Default from tensorflow.
name: Default from tensorflow.
weight_quantization: Callable object which quantize variable.
args: Args.
kwargs: Kwargs.
"""
assert callable(weight_quantization)
var = getter(name, *args, **kwargs)
with tf.compat.v1.variable_scope(name):
if "kernel" == var.op.name.split("/")[-1]:
if not quantize_first_convolution:
if var.op.name.startswith("block_1/"):
return var
if not quantize_last_convolution:
if var.op.name.startswith("conv_23/"):
return var
# Apply weight quantize to variable whose last word of name is "kernel".
quantized_kernel = weight_quantization(var)
tf.compat.v1.summary.histogram("quantized_kernel", quantized_kernel)
return quantized_kernel
return var
def base(self, images, is_training):
custom_getter = partial(
self._quantized_variable_getter,
self.weight_quantization,
self.quantize_first_convolution,
self.quantize_last_convolution,
)
with tf.compat.v1.variable_scope("", custom_getter=custom_getter):
return super().base(images, is_training)
|
#!/usr/bin/env python3
from kubernetes.shell_utils import simple_run as run
for genome_version, vcf_path in [
("37", "gs://seqr-reference-data/GRCh37/MPC/fordist_constraint_official_mpc_values.vcf.gz"),
("38", "gs://seqr-reference-data/GRCh38/MPC/fordist_constraint_official_mpc_values.liftover.GRCh38.vcf.gz"),
]:
run(("python3 gcloud_dataproc/v02/run_script.py "
"--cluster create-ht-mpc "
"hail_scripts/v02/convert_vcf_to_hail.py "
"--output-sites-only-ht "
f"--genome-version {genome_version} "
f"{vcf_path}"))
|
# -*- coding: utf-8 -*-
"""
Plant Classification webpage
Author: Ignacio Heredia
Date: December 2016
Descrition:
This script launches a basic webpage interface to return results on the plant classification.
To launch the webpage, enter in Ubuntu terminal:
export FLASK_APP=serve.py
python -m flask run
Tip:
To host the app in a subpath through a proxy_pass with nginx check Ross's anwer in [1].
Redirections must then be made with either:
* redirect(url_for('intmain', _external=True))
* redirect('./')
References:
[1] https://stackoverflow.com/questions/25962224/running-a-flask-application-at-a-url-that-is-not-the-domain-root
"""
from flask import Flask, render_template, request, send_from_directory, redirect, url_for, json, Response, make_response
import os
from webpage_utils import url_prediction, localfile_prediction, print_error, label_list_to_html
homedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# Configuration parameters of the web application
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'PNG', 'JPG', 'JPEG'])
if os.path.isfile('secret_key.txt'):
app.secret_key = open('secret_key.txt', 'r').read()
else:
app.secret_key = 'devkey, should be in a file'
# Create labels.html from synsets.txt
label_list_to_html(os.path.join(homedir, 'model_files', 'data', 'synsets.txt'))
@app.route('/')
def intmain():
return render_template("index.html")
@app.route('/labels')
def label_list():
return render_template('label_list.html')
@app.route('/robots.txt')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/url_upload', methods=['POST'])
def url_post():
url_list = request.form['url']
url_list = [i.replace(' ', '') for i in url_list.split(' ') if i != '']
message = url_prediction(url_list)
if message['status'] == 'error':
print_error(app, message)
return redirect(url_for('intmain', _external=True))
if message['status'] == 'OK':
return render_template('results.html', predictions=message)
@app.route('/local_upload', methods=['POST'])
def local_post():
uploaded_files = request.files.getlist("local_files")
message = localfile_prediction(app, uploaded_files)
if message['status'] == 'error':
print_error(app, message)
return redirect(url_for('intmain', _external=True))
if message['status'] == 'OK':
return render_template('results.html', predictions=message)
if __name__ == '__main__':
app.debug = False
app.run()
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import abc
from typing import (
Any, Dict, Iterator, List, Optional, Union,
)
from pyhocon import ConfigTree
from databuilder.extractor.base_extractor import Extractor
from databuilder.models.table_metadata import ColumnMetadata
class ElasticsearchBaseExtractor(Extractor):
"""
Extractor to extract index metadata from Elasticsearch
By default, the extractor does not add sort_order to columns. Set ELASTICSEARCH_CORRECT_SORT_ORDER conf to True
for columns to have correct sort order.
"""
ELASTICSEARCH_CLIENT_CONFIG_KEY = 'client'
ELASTICSEARCH_EXTRACT_TECHNICAL_DETAILS = 'extract_technical_details'
# For backwards compatibility, the Elasticsearch extractor does not add sort_order to columns by default.
# Set this to true in the conf for columns to have correct sort order.
ELASTICSEARCH_CORRECT_SORT_ORDER = 'correct_sort_order'
CLUSTER = 'cluster'
SCHEMA = 'schema'
def __init__(self) -> None:
super(ElasticsearchBaseExtractor, self).__init__()
def init(self, conf: ConfigTree) -> None:
self.conf = conf
self._extract_iter = self._get_extract_iter()
self.es = self.conf.get(ElasticsearchBaseExtractor.ELASTICSEARCH_CLIENT_CONFIG_KEY)
def _get_es_version(self) -> str:
return self.es.info().get('version').get('number')
def _get_indexes(self) -> Dict:
result = dict()
try:
_indexes = self.es.indices.get('*')
for k, v in _indexes.items():
if not k.startswith('.'):
result[k] = v
except Exception:
pass
return result
def _get_index_mapping_properties(self, index: Dict) -> Optional[Dict]:
mappings = index.get('mappings', dict())
# Mapping types were removed in Elasticsearch 7. As a result, index mappings are formatted differently.
# See https://www.elastic.co/guide/en/elasticsearch/reference/current/removal-of-types.html
version = self._get_es_version()
try:
if int(version.split('.')[0]) >= 7:
properties = mappings.get('properties', dict())
else:
properties = list(mappings.values())[0].get('properties', dict())
except Exception:
properties = dict()
return properties
def _get_attributes(self,
input_mapping: Dict,
parent_col_name: str = '',
separator: str = '.') -> List[ColumnMetadata]:
cols: List[ColumnMetadata] = []
for col_name, col_mapping in input_mapping.items():
qualified_col_name = str(parent_col_name) + separator + col_name if parent_col_name else col_name
if isinstance(col_mapping, dict):
if col_mapping.__contains__('properties'):
# Need to recurse
inner_mapping: Dict = col_mapping.get('properties', {})
cols.extend(self._get_attributes(input_mapping=inner_mapping,
parent_col_name=qualified_col_name,
separator=separator))
else:
cols.append(ColumnMetadata(name=qualified_col_name,
description='',
col_type=col_mapping.get('type', ''),
sort_order=0))
return cols
def extract(self) -> Any:
try:
result = next(self._extract_iter)
return result
except StopIteration:
return None
@property
def database(self) -> str:
return 'elasticsearch'
@property
def cluster(self) -> str:
return self.conf.get(ElasticsearchBaseExtractor.CLUSTER)
@property
def schema(self) -> str:
return self.conf.get(ElasticsearchBaseExtractor.SCHEMA)
@property
def _extract_technical_details(self) -> bool:
try:
return self.conf.get(ElasticsearchBaseExtractor.ELASTICSEARCH_EXTRACT_TECHNICAL_DETAILS)
except Exception:
return False
@property
def _correct_sort_order(self) -> bool:
try:
return self.conf.get(ElasticsearchBaseExtractor.ELASTICSEARCH_CORRECT_SORT_ORDER)
except Exception:
return False
@abc.abstractmethod
def _get_extract_iter(self) -> Iterator[Union[Any, None]]:
pass
|
from pathlib import Path
from DenoiseSum.utils import JSONIterator
def build_dataset(input_file:Path, output_path:Path, review_key:str):
for object in JSONIterator(input_file):
content = object[review_key]
sum(not c.isalnum() for c in content)
|
from datetime import datetime
from django.test.testcases import TestCase
from casexml.apps.stock.models import StockReport
from corehq.apps.commtrack.models import StockState
from corehq.apps.products.models import Product
from corehq.apps.sms.mixin import VerifiedNumber
from corehq.apps.sms.models import SMS
from custom.ewsghana.alerts import ONGOING_NON_REPORTING, ONGOING_STOCKOUT_AT_SDP, ONGOING_STOCKOUT_AT_RMS
from custom.ewsghana.alerts.ongoing_non_reporting import OnGoingNonReporting
from custom.ewsghana.alerts.ongoing_stockouts import OnGoingStockouts, OnGoingStockoutsRMS
from custom.ewsghana.tests.test_reminders import create_stock_report
from custom.ewsghana.utils import prepare_domain, bootstrap_web_user, make_loc, assign_products_to_location, \
create_backend, set_sms_notifications
TEST_DOMAIN = 'ewsghana-alerts-test'
class TestAlerts(TestCase):
@classmethod
def setUpClass(cls):
cls.domain = prepare_domain(TEST_DOMAIN)
cls.sms_backend_mapping, cls.backend = create_backend()
cls.national = make_loc(code='national', name='National', type='country', domain=TEST_DOMAIN)
cls.region = make_loc(code="region", name="Test Region", type="region", domain=TEST_DOMAIN,
parent=cls.national)
cls.rms = make_loc(code="rms", name="Test Medical Store", type="Regional Medical Store",
domain=TEST_DOMAIN, parent=cls.region)
cls.rms2 = make_loc(code="rms2", name="Test Medical Store 2", type="Regional Medical Store",
domain=TEST_DOMAIN, parent=cls.region)
cls.district = make_loc(code="district", name="Test District", type="district", domain=TEST_DOMAIN)
cls.loc1 = make_loc(code="tf", name="Test Facility", type="Hospital", domain=TEST_DOMAIN,
parent=cls.district)
cls.loc2 = make_loc(code="tf2", name="Test Facility 2", type="Hospital", domain=TEST_DOMAIN,
parent=cls.district)
cls.user1 = bootstrap_web_user(
username='test1', phone_number='1111', location=cls.district, domain=TEST_DOMAIN,
first_name='test', last_name='test1',
user_data={
'role': []
}, email='test1@example.com', password='dummy'
)
set_sms_notifications(TEST_DOMAIN, cls.user1, True)
cls.national_user = bootstrap_web_user(
username='test2', phone_number='2222', location=cls.national, domain=TEST_DOMAIN,
first_name='test', last_name='test2',
user_data={
'role': []
}, email='test2@example.com', password='dummy'
)
set_sms_notifications(TEST_DOMAIN, cls.national_user, True)
cls.regional_user = bootstrap_web_user(
username='test4', phone_number='4444', location=cls.region, domain=TEST_DOMAIN,
first_name='test', last_name='test4',
user_data={
'role': []
}, email='test4@example.com', password='dummy'
)
set_sms_notifications(TEST_DOMAIN, cls.regional_user, True)
cls.product = Product(domain=TEST_DOMAIN, name='Test Product', code_='tp', unit='each')
cls.product.save()
cls.product2 = Product(domain=TEST_DOMAIN, name='Test Product2', code_='tp2', unit='each')
cls.product2.save()
assign_products_to_location(cls.loc1, [cls.product])
assign_products_to_location(cls.loc2, [cls.product, cls.product2])
assign_products_to_location(cls.rms, [cls.product, cls.product2])
@classmethod
def tearDownClass(cls):
cls.user1.delete()
cls.national_user.delete()
cls.regional_user.delete()
cls.backend.delete()
cls.sms_backend_mapping.delete()
for vn in VerifiedNumber.by_domain(TEST_DOMAIN):
vn.delete()
def tearDown(self):
SMS.objects.all().delete()
StockReport.objects.all().delete()
StockState.objects.all().delete()
def test_ongoing_non_reporting(self):
OnGoingNonReporting(TEST_DOMAIN).send()
self.assertEqual(SMS.objects.count(), 1)
smses = SMS.objects.all()
self.assertEqual(smses[0].text, ONGOING_NON_REPORTING % 'Test Facility, Test Facility 2')
create_stock_report(self.loc1, {'tp': 1})
now = datetime.utcnow()
OnGoingNonReporting(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 1)
self.assertEqual(smses[0].text, ONGOING_NON_REPORTING % 'Test Facility 2')
create_stock_report(self.loc2, {'tp2': 1})
now = datetime.utcnow()
OnGoingNonReporting(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 0)
def test_ongoing_stockouts(self):
OnGoingStockouts(TEST_DOMAIN).send()
self.assertEqual(SMS.objects.count(), 0)
create_stock_report(self.loc1, {'tp': 0})
now = datetime.utcnow()
OnGoingStockouts(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 1)
self.assertEqual(smses[0].text, ONGOING_STOCKOUT_AT_SDP % 'Test Facility')
create_stock_report(self.loc2, {'tp': 0})
now = datetime.utcnow()
OnGoingStockouts(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 1)
self.assertEqual(smses[0].text, ONGOING_STOCKOUT_AT_SDP % 'Test Facility, Test Facility 2')
create_stock_report(self.loc1, {'tp': 10})
create_stock_report(self.loc2, {'tp': 10})
now = datetime.utcnow()
OnGoingStockouts(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 0)
def test_ongoing_stockouts_rms(self):
OnGoingStockoutsRMS(TEST_DOMAIN).send()
self.assertEqual(SMS.objects.count(), 0)
create_stock_report(self.rms, {'tp': 0})
create_stock_report(self.rms2, {'tp': 0})
now = datetime.utcnow()
OnGoingStockoutsRMS(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 2)
self.assertEqual(smses[0].text, ONGOING_STOCKOUT_AT_RMS % 'Test Medical Store, Test Medical Store 2')
create_stock_report(self.rms2, {'tp': 15})
now = datetime.utcnow()
OnGoingStockoutsRMS(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 2)
self.assertEqual(smses[0].text, ONGOING_STOCKOUT_AT_RMS % 'Test Medical Store')
create_stock_report(self.rms, {'tp': 15})
now = datetime.utcnow()
OnGoingStockoutsRMS(TEST_DOMAIN).send()
smses = SMS.objects.filter(date__gte=now)
self.assertEqual(smses.count(), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.