hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72b0ab4b78ec9b7eb7deec2b8193a86ca41b48e | 938 | py | Python | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | import pytest
from year_2020.day13.shuttle_search import (
get_bus_id_times_wait_time,
get_earliest_bus_and_wait_time_for_airport,
get_shuttle_company_solution,
)
TEST_INPUT = """
939
7,13,x,x,59,x,31,19
"""
TEST_INPUT_2 = """
0
17,x,13,19
"""
TEST_INPUT_3 = """
0
67,7,59,61
"""
TEST_INPUT_4 = """
0
67,x,7,59,61
"""
TEST_INPUT_5 = """
0
67,7,x,59,61
"""
TEST_INPUT_6 = """
0
1789,37,47,1889
"""
def test_part_1():
assert get_bus_id_times_wait_time(TEST_INPUT) == 295
assert get_earliest_bus_and_wait_time_for_airport(TEST_INPUT) == (59, 5)
@pytest.mark.parametrize(
"test_input,expected",
[
(TEST_INPUT, 1068781),
(TEST_INPUT_2, 3417),
(TEST_INPUT_3, 754018),
(TEST_INPUT_4, 779210),
(TEST_INPUT_5, 1261476),
(TEST_INPUT_6, 1202161486),
],
)
def test_part_2(test_input, expected):
assert get_shuttle_company_solution(test_input) == expected
| 16.172414 | 76 | 0.672708 | import pytest
from year_2020.day13.shuttle_search import (
get_bus_id_times_wait_time,
get_earliest_bus_and_wait_time_for_airport,
get_shuttle_company_solution,
)
TEST_INPUT = """
939
7,13,x,x,59,x,31,19
"""
TEST_INPUT_2 = """
0
17,x,13,19
"""
TEST_INPUT_3 = """
0
67,7,59,61
"""
TEST_INPUT_4 = """
0
67,x,7,59,61
"""
TEST_INPUT_5 = """
0
67,7,x,59,61
"""
TEST_INPUT_6 = """
0
1789,37,47,1889
"""
def test_part_1():
assert get_bus_id_times_wait_time(TEST_INPUT) == 295
assert get_earliest_bus_and_wait_time_for_airport(TEST_INPUT) == (59, 5)
@pytest.mark.parametrize(
"test_input,expected",
[
(TEST_INPUT, 1068781),
(TEST_INPUT_2, 3417),
(TEST_INPUT_3, 754018),
(TEST_INPUT_4, 779210),
(TEST_INPUT_5, 1261476),
(TEST_INPUT_6, 1202161486),
],
)
def test_part_2(test_input, expected):
assert get_shuttle_company_solution(test_input) == expected
| true | true |
f72b0ad54d6dd35fc8e313c9014957d5d7c84c64 | 2,327 | py | Python | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | 2 | 2019-11-03T16:59:34.000Z | 2021-04-17T05:41:01.000Z | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | null | null | null | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import sys
sys.path.append("..")
from Laplacian import *
def getCirculantAdj(N, lags):
#Setup circular parts
I = range(N)*(len(lags)+2)
J = range(1, N+1) + range(-1, N-1)
J[N-1] = 0
J[N] = N-1
for lag in lags:
J = J + (np.mod(np.arange(N) + lag, N)).tolist()
V = np.ones(len(I))
return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
def getOneOnK(N, k):
lags = [i*N/k for i in range(1, k)]
return getCirculantAdj(N, lags)
def getCircleEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 2 - 2*np.cos(2*np.pi*i/N)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return lambdas
def getMoebiusEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
def get3WayEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
if __name__ == '__main__':
N = 100
A = getOneOnK(N, 2)
#A = getCirculantAdj(N, [30, 60, 80])
A = A.toarray()
(w, v, L) = getLaplacianEigsDense(A, A.shape[0])
(lambdas, lambdassorted) = get3WayEigs(N)
plt.figure(figsize=(15, 4))
plt.subplot(132)
plt.plot(lambdas)
plt.title("Eigenvalues")
plt.xlabel("Eigenvalue Number")
plt.ylabel("Eigenvalue")
# plt.subplot(224)
# plt.scatter(w, lambdassorted)
# plt.xlabel("Numerically Computed")
# plt.ylabel("Analytic")
# plt.axis('equal')
# plt.title("Checking accuracy")
plt.subplot(131)
plt.imshow(A, interpolation = 'nearest', cmap = 'gray')
plt.title("Adjacency Matrix")
plt.subplot(133)
plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')
plt.xlabel("k-th Smallest Eigenvector")
plt.title("Eigenvectors")
plt.savefig("Eigs.svg", bbox_inches = 'tight')
| 26.146067 | 78 | 0.5578 | import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import sys
sys.path.append("..")
from Laplacian import *
def getCirculantAdj(N, lags):
I = range(N)*(len(lags)+2)
J = range(1, N+1) + range(-1, N-1)
J[N-1] = 0
J[N] = N-1
for lag in lags:
J = J + (np.mod(np.arange(N) + lag, N)).tolist()
V = np.ones(len(I))
return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
def getOneOnK(N, k):
lags = [i*N/k for i in range(1, k)]
return getCirculantAdj(N, lags)
def getCircleEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 2 - 2*np.cos(2*np.pi*i/N)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return lambdas
def getMoebiusEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
def get3WayEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
if __name__ == '__main__':
N = 100
A = getOneOnK(N, 2)
A = A.toarray()
(w, v, L) = getLaplacianEigsDense(A, A.shape[0])
(lambdas, lambdassorted) = get3WayEigs(N)
plt.figure(figsize=(15, 4))
plt.subplot(132)
plt.plot(lambdas)
plt.title("Eigenvalues")
plt.xlabel("Eigenvalue Number")
plt.ylabel("Eigenvalue")
plt.subplot(131)
plt.imshow(A, interpolation = 'nearest', cmap = 'gray')
plt.title("Adjacency Matrix")
plt.subplot(133)
plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')
plt.xlabel("k-th Smallest Eigenvector")
plt.title("Eigenvectors")
plt.savefig("Eigs.svg", bbox_inches = 'tight')
| true | true |
f72b0b19c49d94d5feee3fd0a9c9902892c5cb86 | 28,656 | py | Python | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | # tempfile.py unit tests.
import tempfile
import os
import sys
import re
import errno
import warnings
import unittest
from test import support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform == 'mac':
TEST_FILES = 32
elif sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertFalse(s in dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertTrue(isinstance(c, str),
"%s is not a string" % c)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertTrue(dirname in cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertTrue(dirname in cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertTrue(isinstance(obj, tempfile._RandomNameSequence))
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertTrue(isinstance(p, str))
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| 31.559471 | 100 | 0.576947 |
import tempfile
import os
import sys
import re
import errno
import warnings
import unittest
from test import support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
if sys.platform == 'mac':
TEST_FILES = 32
elif sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertFalse(s in dict)
dict[s] = 1
def supports_iter(self):
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
def test_nonempty_list(self):
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertTrue(isinstance(c, str),
"%s is not a string" % c)
def test_wanted_dirs(self):
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertTrue(dirname in cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertTrue(dirname in cand)
test_classes.append(test__candidate_tempdir_list)
class test__get_candidate_names(TC):
def test_retval(self):
obj = tempfile._get_candidate_names()
self.assertTrue(isinstance(obj, tempfile._RandomNameSequence))
def test_same_thing(self):
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
if not has_stat:
return
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
if not has_spawnl:
return
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
if not has_textmode:
return
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertTrue(isinstance(p, str))
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777
expected = 0o700
if sys.platform in ('win32', 'os2emx', 'mac'):
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
te)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| true | true |
f72b0b29ec60b1e3fa0dcfba14c0246d70315797 | 1,173 | py | Python | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | 5 | 2020-05-28T18:03:58.000Z | 2021-11-01T13:14:26.000Z | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | 5 | 2020-09-26T01:12:41.000Z | 2022-02-10T02:01:25.000Z | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | null | null | null | import numpy as np
########################################
### Polyphony --- discarded
########################################
def polyphony_level_diff(roll_output,roll_target):
poly_output = np.sum(roll_output,axis=0)
poly_target = np.sum(roll_target,axis=0)
poly_diff = np.abs(poly_output-poly_target)
return np.mean(poly_diff),np.std(poly_diff),np.min(poly_diff),np.max(poly_diff)
# discarded
def false_negative_polyphony_level(roll_target,intervals_target,match):
fs = 100
if len(match) == 0:
unmatched_targets = list(range(intervals_target))
else:
matched_targets, matched_outputs = zip(*match)
# unmatched_targets= list(set(range(len(vel_target)))-set(matched_targets))
unmatched_targets= list(set(range(len(intervals_target)))-set(matched_targets))
unmatched_intervals = intervals_target[unmatched_targets,:]
all_avg_poly = []
for [start,end] in unmatched_intervals:
start_idx = int(round(start*fs))
end_idx = int(round(end*fs))
avg_poly = np.mean(np.sum(roll_target[:,start_idx:end_idx],axis=0))
all_avg_poly += [avg_poly]
return all_avg_poly
| 30.076923 | 87 | 0.656436 | import numpy as np
| true | true |
f72b0bcdbe61d8b42e2ce9462ada3ba434fd8b03 | 2,078 | py | Python | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import secrets
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import round
from tests.common.gen_random import random_gaussian
secretsGenerator = secrets.SystemRandom()
def round_run(shape, dtype, attrs):
in_shape = [shape]
in_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name='round', attrs=attrs)
expect, input, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (input, output), expect=expect)
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=1, sigma=10).astype(dtype)
a = secretsGenerator.randint(0, 9)
if a % 2 == 0:
input = input.astype('int32') + 0.5
input = input.astype(dtype)
input_f16 = input.astype(np.float16)
expect = np.round(input_f16).astype("int32")
output = np.full(shape, np.nan, "int32")
return expect, input, output
| 39.961538 | 120 | 0.702117 |
import numpy as np
import secrets
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import round
from tests.common.gen_random import random_gaussian
secretsGenerator = secrets.SystemRandom()
def round_run(shape, dtype, attrs):
in_shape = [shape]
in_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name='round', attrs=attrs)
expect, input, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (input, output), expect=expect)
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=1, sigma=10).astype(dtype)
a = secretsGenerator.randint(0, 9)
if a % 2 == 0:
input = input.astype('int32') + 0.5
input = input.astype(dtype)
input_f16 = input.astype(np.float16)
expect = np.round(input_f16).astype("int32")
output = np.full(shape, np.nan, "int32")
return expect, input, output
| true | true |
f72b0bdf689c564a67a58c7ea477390e6e8c6c23 | 24,215 | py | Python | homeassistant/components/mqtt/fan.py | wlcrs/core | cf27b82d2fdce406fda3b1b9cd52d42d7f7d00d6 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mqtt/fan.py | wlcrs/core | cf27b82d2fdce406fda3b1b9cd52d42d7f7d00d6 | [
"Apache-2.0"
] | 7 | 2022-03-01T06:32:08.000Z | 2022-03-31T07:20:49.000Z | homeassistant/components/mqtt/fan.py | fblondeau/home-assistant | a8da0eedd32ac8198f06d4e32622d0f8b40b4a41 | [
"Apache-2.0"
] | null | null | null | """Support for MQTT fans."""
from __future__ import annotations
import asyncio
import functools
import logging
import math
import voluptuous as vol
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import subscription
from .config import MQTT_RW_SCHEMA
from .const import (
CONF_COMMAND_TEMPLATE,
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_STATE_VALUE_TEMPLATE,
PAYLOAD_NONE,
)
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
async_get_platform_config_from_yaml,
async_setup_entry_helper,
async_setup_platform_helper,
warn_for_legacy_schema,
)
from .models import MqttCommandTemplate, MqttValueTemplate
from .util import valid_publish_topic, valid_subscribe_topic
CONF_PERCENTAGE_STATE_TOPIC = "percentage_state_topic"
CONF_PERCENTAGE_COMMAND_TOPIC = "percentage_command_topic"
CONF_PERCENTAGE_VALUE_TEMPLATE = "percentage_value_template"
CONF_PERCENTAGE_COMMAND_TEMPLATE = "percentage_command_template"
CONF_PAYLOAD_RESET_PERCENTAGE = "payload_reset_percentage"
CONF_SPEED_RANGE_MIN = "speed_range_min"
CONF_SPEED_RANGE_MAX = "speed_range_max"
CONF_PRESET_MODE_STATE_TOPIC = "preset_mode_state_topic"
CONF_PRESET_MODE_COMMAND_TOPIC = "preset_mode_command_topic"
CONF_PRESET_MODE_VALUE_TEMPLATE = "preset_mode_value_template"
CONF_PRESET_MODE_COMMAND_TEMPLATE = "preset_mode_command_template"
CONF_PRESET_MODES_LIST = "preset_modes"
CONF_PAYLOAD_RESET_PRESET_MODE = "payload_reset_preset_mode"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_OSCILLATION_COMMAND_TEMPLATE = "oscillation_command_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_RESET = "None"
DEFAULT_OPTIMISTIC = False
DEFAULT_SPEED_RANGE_MIN = 1
DEFAULT_SPEED_RANGE_MAX = 100
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
MQTT_FAN_ATTRIBUTES_BLOCKED = frozenset(
{
fan.ATTR_DIRECTION,
fan.ATTR_OSCILLATING,
fan.ATTR_PERCENTAGE_STEP,
fan.ATTR_PERCENTAGE,
fan.ATTR_PRESET_MODE,
fan.ATTR_PRESET_MODES,
}
)
_LOGGER = logging.getLogger(__name__)
def valid_speed_range_configuration(config):
"""Validate that the fan speed_range configuration is valid, throws if it isn't."""
if config.get(CONF_SPEED_RANGE_MIN) == 0:
raise ValueError("speed_range_min must be > 0")
if config.get(CONF_SPEED_RANGE_MIN) >= config.get(CONF_SPEED_RANGE_MAX):
raise ValueError("speed_range_max must be > speed_range_min")
return config
def valid_preset_mode_configuration(config):
"""Validate that the preset mode reset payload is not one of the preset modes."""
if config.get(CONF_PAYLOAD_RESET_PRESET_MODE) in config.get(CONF_PRESET_MODES_LIST):
raise ValueError("preset_modes must not contain payload_reset_preset_mode")
return config
_PLATFORM_SCHEMA_BASE = MQTT_RW_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_PERCENTAGE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PERCENTAGE_VALUE_TEMPLATE): cv.template,
# CONF_PRESET_MODE_COMMAND_TOPIC and CONF_PRESET_MODES_LIST must be used together
vol.Inclusive(
CONF_PRESET_MODE_COMMAND_TOPIC, "preset_modes"
): valid_publish_topic,
vol.Inclusive(
CONF_PRESET_MODES_LIST, "preset_modes", default=[]
): cv.ensure_list,
vol.Optional(CONF_PRESET_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PRESET_MODE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PRESET_MODE_VALUE_TEMPLATE): cv.template,
vol.Optional(
CONF_SPEED_RANGE_MIN, default=DEFAULT_SPEED_RANGE_MIN
): cv.positive_int,
vol.Optional(
CONF_SPEED_RANGE_MAX, default=DEFAULT_SPEED_RANGE_MAX
): cv.positive_int,
vol.Optional(
CONF_PAYLOAD_RESET_PERCENTAGE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(
CONF_PAYLOAD_RESET_PRESET_MODE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_SPEED_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
# Configuring MQTT Fans under the fan platform key is deprecated in HA Core 2022.6
PLATFORM_SCHEMA = vol.All(
cv.PLATFORM_SCHEMA.extend(_PLATFORM_SCHEMA_BASE.schema),
valid_speed_range_configuration,
valid_preset_mode_configuration,
warn_for_legacy_schema(fan.DOMAIN),
)
PLATFORM_SCHEMA_MODERN = vol.All(
_PLATFORM_SCHEMA_BASE,
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
DISCOVERY_SCHEMA = vol.All(
# CONF_SPEED_COMMAND_TOPIC, CONF_SPEED_LIST, CONF_SPEED_STATE_TOPIC, CONF_SPEED_VALUE_TEMPLATE and
# Speeds SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH SPEED_OFF,
# are no longer supported, support was removed in release 2021.12
cv.removed(CONF_PAYLOAD_HIGH_SPEED),
cv.removed(CONF_PAYLOAD_LOW_SPEED),
cv.removed(CONF_PAYLOAD_MEDIUM_SPEED),
cv.removed(CONF_SPEED_COMMAND_TOPIC),
cv.removed(CONF_SPEED_LIST),
cv.removed(CONF_SPEED_STATE_TOPIC),
cv.removed(CONF_SPEED_VALUE_TEMPLATE),
_PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA),
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up MQTT fans configured under the fan platform key (deprecated)."""
# Deprecated in HA Core 2022.6
await async_setup_platform_helper(
hass, fan.DOMAIN, config, async_add_entities, _async_setup_entity
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up MQTT fan through configuration.yaml and dynamically through MQTT discovery."""
# load and initialize platform config from configuration.yaml
await asyncio.gather(
*(
_async_setup_entity(hass, async_add_entities, config, config_entry)
for config in await async_get_platform_config_from_yaml(
hass, fan.DOMAIN, PLATFORM_SCHEMA_MODERN
)
)
)
# setup for discovery
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, fan.DOMAIN, setup, DISCOVERY_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(MqttEntity, FanEntity):
"""A MQTT fan component."""
_entity_id_format = fan.ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_FAN_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT fan."""
self._state = None
self._percentage = None
self._preset_mode = None
self._oscillation = None
self._supported_features = 0
self._topic = None
self._payload = None
self._value_templates = None
self._command_templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_percentage = None
self._optimistic_preset_mode = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return DISCOVERY_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._speed_range = (
config.get(CONF_SPEED_RANGE_MIN),
config.get(CONF_SPEED_RANGE_MAX),
)
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_PERCENTAGE_STATE_TOPIC,
CONF_PERCENTAGE_COMMAND_TOPIC,
CONF_PRESET_MODE_STATE_TOPIC,
CONF_PRESET_MODE_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._value_templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_VALUE_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_VALUE_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._command_templates = {
CONF_STATE: config.get(CONF_COMMAND_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_COMMAND_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_COMMAND_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_COMMAND_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"PERCENTAGE_RESET": config[CONF_PAYLOAD_RESET_PERCENTAGE],
"PRESET_MODE_RESET": config[CONF_PAYLOAD_RESET_PRESET_MODE],
}
self._feature_percentage = CONF_PERCENTAGE_COMMAND_TOPIC in config
self._feature_preset_mode = CONF_PRESET_MODE_COMMAND_TOPIC in config
if self._feature_preset_mode:
self._preset_modes = config[CONF_PRESET_MODES_LIST]
else:
self._preset_modes = []
self._speed_count = (
min(int_states_in_range(self._speed_range), 100)
if self._feature_percentage
else 100
)
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_percentage = (
optimistic or self._topic[CONF_PERCENTAGE_STATE_TOPIC] is None
)
self._optimistic_preset_mode = (
optimistic or self._topic[CONF_PRESET_MODE_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and FanEntityFeature.OSCILLATE
)
if self._feature_percentage:
self._supported_features |= FanEntityFeature.SET_SPEED
if self._feature_preset_mode:
self._supported_features |= FanEntityFeature.PRESET_MODE
for key, tpl in self._command_templates.items():
self._command_templates[key] = MqttCommandTemplate(
tpl, entity=self
).async_render
for key, tpl in self._value_templates.items():
self._value_templates[key] = MqttValueTemplate(
tpl,
entity=self,
).async_render_with_possible_json_value
def _prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._value_templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state from '%s'", msg.topic)
return
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
elif payload == PAYLOAD_NONE:
self._state = None
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
@callback
@log_messages(self.hass, self.entity_id)
def percentage_received(msg):
"""Handle new received MQTT message for the percentage."""
rendered_percentage_payload = self._value_templates[ATTR_PERCENTAGE](
msg.payload
)
if not rendered_percentage_payload:
_LOGGER.debug("Ignoring empty speed from '%s'", msg.topic)
return
if rendered_percentage_payload == self._payload["PERCENTAGE_RESET"]:
self._percentage = None
self.async_write_ha_state()
return
try:
percentage = ranged_value_to_percentage(
self._speed_range, int(rendered_percentage_payload)
)
except ValueError:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
if percentage < 0 or percentage > 100:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
self._percentage = percentage
self.async_write_ha_state()
if self._topic[CONF_PERCENTAGE_STATE_TOPIC] is not None:
topics[CONF_PERCENTAGE_STATE_TOPIC] = {
"topic": self._topic[CONF_PERCENTAGE_STATE_TOPIC],
"msg_callback": percentage_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._percentage = None
@callback
@log_messages(self.hass, self.entity_id)
def preset_mode_received(msg):
"""Handle new received MQTT message for preset mode."""
preset_mode = self._value_templates[ATTR_PRESET_MODE](msg.payload)
if preset_mode == self._payload["PRESET_MODE_RESET"]:
self._preset_mode = None
self.async_write_ha_state()
return
if not preset_mode:
_LOGGER.debug("Ignoring empty preset_mode from '%s'", msg.topic)
return
if preset_mode not in self.preset_modes:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid preset mode",
msg.payload,
msg.topic,
preset_mode,
)
return
self._preset_mode = preset_mode
self.async_write_ha_state()
if self._topic[CONF_PRESET_MODE_STATE_TOPIC] is not None:
topics[CONF_PRESET_MODE_STATE_TOPIC] = {
"topic": self._topic[CONF_PRESET_MODE_STATE_TOPIC],
"msg_callback": preset_mode_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._preset_mode = None
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = self._value_templates[ATTR_OSCILLATING](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty oscillation from '%s'", msg.topic)
return
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._oscillation = False
self._sub_state = subscription.async_prepare_subscribe_topics(
self.hass, self._sub_state, topics
)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
await subscription.async_subscribe_topics(self.hass, self._sub_state)
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self) -> bool | None:
"""Return true if device is on."""
return self._state
@property
def percentage(self):
"""Return the current percentage."""
return self._percentage
@property
def preset_mode(self):
"""Return the current preset _mode."""
return self._preset_mode
@property
def preset_modes(self) -> list:
"""Get the list of available preset modes."""
return self._preset_modes
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return self._speed_count
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
# The speed attribute deprecated in the schema, support will be removed after a quarter (2021.7)
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_ON"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if percentage:
await self.async_set_percentage(percentage)
if preset_mode:
await self.async_set_preset_mode(preset_mode)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_OFF"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_percentage(self, percentage: int) -> None:
"""Set the percentage of the fan.
This method is a coroutine.
"""
percentage_payload = math.ceil(
percentage_to_ranged_value(self._speed_range, percentage)
)
mqtt_payload = self._command_templates[ATTR_PERCENTAGE](percentage_payload)
await self.async_publish(
self._topic[CONF_PERCENTAGE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_percentage:
self._percentage = percentage
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode of the fan.
This method is a coroutine.
"""
self._valid_preset_mode_or_raise(preset_mode)
mqtt_payload = self._command_templates[ATTR_PRESET_MODE](preset_mode)
await self.async_publish(
self._topic[CONF_PRESET_MODE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_preset_mode:
self._preset_mode = preset_mode
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if oscillating:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_ON_PAYLOAD"]
)
else:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_OFF_PAYLOAD"]
)
await self.async_publish(
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
| 36.800912 | 102 | 0.663556 | from __future__ import annotations
import asyncio
import functools
import logging
import math
import voluptuous as vol
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import subscription
from .config import MQTT_RW_SCHEMA
from .const import (
CONF_COMMAND_TEMPLATE,
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_STATE_VALUE_TEMPLATE,
PAYLOAD_NONE,
)
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
async_get_platform_config_from_yaml,
async_setup_entry_helper,
async_setup_platform_helper,
warn_for_legacy_schema,
)
from .models import MqttCommandTemplate, MqttValueTemplate
from .util import valid_publish_topic, valid_subscribe_topic
CONF_PERCENTAGE_STATE_TOPIC = "percentage_state_topic"
CONF_PERCENTAGE_COMMAND_TOPIC = "percentage_command_topic"
CONF_PERCENTAGE_VALUE_TEMPLATE = "percentage_value_template"
CONF_PERCENTAGE_COMMAND_TEMPLATE = "percentage_command_template"
CONF_PAYLOAD_RESET_PERCENTAGE = "payload_reset_percentage"
CONF_SPEED_RANGE_MIN = "speed_range_min"
CONF_SPEED_RANGE_MAX = "speed_range_max"
CONF_PRESET_MODE_STATE_TOPIC = "preset_mode_state_topic"
CONF_PRESET_MODE_COMMAND_TOPIC = "preset_mode_command_topic"
CONF_PRESET_MODE_VALUE_TEMPLATE = "preset_mode_value_template"
CONF_PRESET_MODE_COMMAND_TEMPLATE = "preset_mode_command_template"
CONF_PRESET_MODES_LIST = "preset_modes"
CONF_PAYLOAD_RESET_PRESET_MODE = "payload_reset_preset_mode"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_OSCILLATION_COMMAND_TEMPLATE = "oscillation_command_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_RESET = "None"
DEFAULT_OPTIMISTIC = False
DEFAULT_SPEED_RANGE_MIN = 1
DEFAULT_SPEED_RANGE_MAX = 100
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
MQTT_FAN_ATTRIBUTES_BLOCKED = frozenset(
{
fan.ATTR_DIRECTION,
fan.ATTR_OSCILLATING,
fan.ATTR_PERCENTAGE_STEP,
fan.ATTR_PERCENTAGE,
fan.ATTR_PRESET_MODE,
fan.ATTR_PRESET_MODES,
}
)
_LOGGER = logging.getLogger(__name__)
def valid_speed_range_configuration(config):
if config.get(CONF_SPEED_RANGE_MIN) == 0:
raise ValueError("speed_range_min must be > 0")
if config.get(CONF_SPEED_RANGE_MIN) >= config.get(CONF_SPEED_RANGE_MAX):
raise ValueError("speed_range_max must be > speed_range_min")
return config
def valid_preset_mode_configuration(config):
if config.get(CONF_PAYLOAD_RESET_PRESET_MODE) in config.get(CONF_PRESET_MODES_LIST):
raise ValueError("preset_modes must not contain payload_reset_preset_mode")
return config
_PLATFORM_SCHEMA_BASE = MQTT_RW_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_PERCENTAGE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PERCENTAGE_VALUE_TEMPLATE): cv.template,
vol.Inclusive(
CONF_PRESET_MODE_COMMAND_TOPIC, "preset_modes"
): valid_publish_topic,
vol.Inclusive(
CONF_PRESET_MODES_LIST, "preset_modes", default=[]
): cv.ensure_list,
vol.Optional(CONF_PRESET_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PRESET_MODE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PRESET_MODE_VALUE_TEMPLATE): cv.template,
vol.Optional(
CONF_SPEED_RANGE_MIN, default=DEFAULT_SPEED_RANGE_MIN
): cv.positive_int,
vol.Optional(
CONF_SPEED_RANGE_MAX, default=DEFAULT_SPEED_RANGE_MAX
): cv.positive_int,
vol.Optional(
CONF_PAYLOAD_RESET_PERCENTAGE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(
CONF_PAYLOAD_RESET_PRESET_MODE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_SPEED_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
PLATFORM_SCHEMA = vol.All(
cv.PLATFORM_SCHEMA.extend(_PLATFORM_SCHEMA_BASE.schema),
valid_speed_range_configuration,
valid_preset_mode_configuration,
warn_for_legacy_schema(fan.DOMAIN),
)
PLATFORM_SCHEMA_MODERN = vol.All(
_PLATFORM_SCHEMA_BASE,
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
DISCOVERY_SCHEMA = vol.All(
cv.removed(CONF_PAYLOAD_HIGH_SPEED),
cv.removed(CONF_PAYLOAD_LOW_SPEED),
cv.removed(CONF_PAYLOAD_MEDIUM_SPEED),
cv.removed(CONF_SPEED_COMMAND_TOPIC),
cv.removed(CONF_SPEED_LIST),
cv.removed(CONF_SPEED_STATE_TOPIC),
cv.removed(CONF_SPEED_VALUE_TEMPLATE),
_PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA),
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
await async_setup_platform_helper(
hass, fan.DOMAIN, config, async_add_entities, _async_setup_entity
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
await asyncio.gather(
*(
_async_setup_entity(hass, async_add_entities, config, config_entry)
for config in await async_get_platform_config_from_yaml(
hass, fan.DOMAIN, PLATFORM_SCHEMA_MODERN
)
)
)
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, fan.DOMAIN, setup, DISCOVERY_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(MqttEntity, FanEntity):
_entity_id_format = fan.ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_FAN_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
self._state = None
self._percentage = None
self._preset_mode = None
self._oscillation = None
self._supported_features = 0
self._topic = None
self._payload = None
self._value_templates = None
self._command_templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_percentage = None
self._optimistic_preset_mode = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
return DISCOVERY_SCHEMA
def _setup_from_config(self, config):
self._speed_range = (
config.get(CONF_SPEED_RANGE_MIN),
config.get(CONF_SPEED_RANGE_MAX),
)
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_PERCENTAGE_STATE_TOPIC,
CONF_PERCENTAGE_COMMAND_TOPIC,
CONF_PRESET_MODE_STATE_TOPIC,
CONF_PRESET_MODE_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._value_templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_VALUE_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_VALUE_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._command_templates = {
CONF_STATE: config.get(CONF_COMMAND_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_COMMAND_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_COMMAND_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_COMMAND_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"PERCENTAGE_RESET": config[CONF_PAYLOAD_RESET_PERCENTAGE],
"PRESET_MODE_RESET": config[CONF_PAYLOAD_RESET_PRESET_MODE],
}
self._feature_percentage = CONF_PERCENTAGE_COMMAND_TOPIC in config
self._feature_preset_mode = CONF_PRESET_MODE_COMMAND_TOPIC in config
if self._feature_preset_mode:
self._preset_modes = config[CONF_PRESET_MODES_LIST]
else:
self._preset_modes = []
self._speed_count = (
min(int_states_in_range(self._speed_range), 100)
if self._feature_percentage
else 100
)
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_percentage = (
optimistic or self._topic[CONF_PERCENTAGE_STATE_TOPIC] is None
)
self._optimistic_preset_mode = (
optimistic or self._topic[CONF_PRESET_MODE_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and FanEntityFeature.OSCILLATE
)
if self._feature_percentage:
self._supported_features |= FanEntityFeature.SET_SPEED
if self._feature_preset_mode:
self._supported_features |= FanEntityFeature.PRESET_MODE
for key, tpl in self._command_templates.items():
self._command_templates[key] = MqttCommandTemplate(
tpl, entity=self
).async_render
for key, tpl in self._value_templates.items():
self._value_templates[key] = MqttValueTemplate(
tpl,
entity=self,
).async_render_with_possible_json_value
def _prepare_subscribe_topics(self):
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
payload = self._value_templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state from '%s'", msg.topic)
return
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
elif payload == PAYLOAD_NONE:
self._state = None
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
@callback
@log_messages(self.hass, self.entity_id)
def percentage_received(msg):
rendered_percentage_payload = self._value_templates[ATTR_PERCENTAGE](
msg.payload
)
if not rendered_percentage_payload:
_LOGGER.debug("Ignoring empty speed from '%s'", msg.topic)
return
if rendered_percentage_payload == self._payload["PERCENTAGE_RESET"]:
self._percentage = None
self.async_write_ha_state()
return
try:
percentage = ranged_value_to_percentage(
self._speed_range, int(rendered_percentage_payload)
)
except ValueError:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
if percentage < 0 or percentage > 100:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
self._percentage = percentage
self.async_write_ha_state()
if self._topic[CONF_PERCENTAGE_STATE_TOPIC] is not None:
topics[CONF_PERCENTAGE_STATE_TOPIC] = {
"topic": self._topic[CONF_PERCENTAGE_STATE_TOPIC],
"msg_callback": percentage_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._percentage = None
@callback
@log_messages(self.hass, self.entity_id)
def preset_mode_received(msg):
preset_mode = self._value_templates[ATTR_PRESET_MODE](msg.payload)
if preset_mode == self._payload["PRESET_MODE_RESET"]:
self._preset_mode = None
self.async_write_ha_state()
return
if not preset_mode:
_LOGGER.debug("Ignoring empty preset_mode from '%s'", msg.topic)
return
if preset_mode not in self.preset_modes:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid preset mode",
msg.payload,
msg.topic,
preset_mode,
)
return
self._preset_mode = preset_mode
self.async_write_ha_state()
if self._topic[CONF_PRESET_MODE_STATE_TOPIC] is not None:
topics[CONF_PRESET_MODE_STATE_TOPIC] = {
"topic": self._topic[CONF_PRESET_MODE_STATE_TOPIC],
"msg_callback": preset_mode_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._preset_mode = None
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
payload = self._value_templates[ATTR_OSCILLATING](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty oscillation from '%s'", msg.topic)
return
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._oscillation = False
self._sub_state = subscription.async_prepare_subscribe_topics(
self.hass, self._sub_state, topics
)
async def _subscribe_topics(self):
await subscription.async_subscribe_topics(self.hass, self._sub_state)
@property
def assumed_state(self):
return self._optimistic
@property
def is_on(self) -> bool | None:
return self._state
@property
def percentage(self):
return self._percentage
@property
def preset_mode(self):
return self._preset_mode
@property
def preset_modes(self) -> list:
return self._preset_modes
@property
def supported_features(self) -> int:
return self._supported_features
@property
def speed_count(self) -> int:
return self._speed_count
@property
def oscillating(self):
return self._oscillation
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_ON"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if percentage:
await self.async_set_percentage(percentage)
if preset_mode:
await self.async_set_preset_mode(preset_mode)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_OFF"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_percentage(self, percentage: int) -> None:
percentage_payload = math.ceil(
percentage_to_ranged_value(self._speed_range, percentage)
)
mqtt_payload = self._command_templates[ATTR_PERCENTAGE](percentage_payload)
await self.async_publish(
self._topic[CONF_PERCENTAGE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_percentage:
self._percentage = percentage
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
self._valid_preset_mode_or_raise(preset_mode)
mqtt_payload = self._command_templates[ATTR_PRESET_MODE](preset_mode)
await self.async_publish(
self._topic[CONF_PRESET_MODE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_preset_mode:
self._preset_mode = preset_mode
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
if oscillating:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_ON_PAYLOAD"]
)
else:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_OFF_PAYLOAD"]
)
await self.async_publish(
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
| true | true |
f72b0d77ba36f92d632793f510174fe192e614ec | 390 | py | Python | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | import functools
def dict_cmp(x, y, key):
if str(x[key]) > str(y[key]):
return 1
elif str(x[key]) < str(y[key]):
return -1
else:
return 0
def sort_dict(dictionary, cmp_func):
arr = []
for key in dictionary:
arr.append((key, dictionary[key]))
arr.sort(key=functools.cmp_to_key(lambda x, y : cmp_func(x[1], y[1])))
return arr | 22.941176 | 74 | 0.574359 | import functools
def dict_cmp(x, y, key):
if str(x[key]) > str(y[key]):
return 1
elif str(x[key]) < str(y[key]):
return -1
else:
return 0
def sort_dict(dictionary, cmp_func):
arr = []
for key in dictionary:
arr.append((key, dictionary[key]))
arr.sort(key=functools.cmp_to_key(lambda x, y : cmp_func(x[1], y[1])))
return arr | true | true |
f72b0e39b7ac8a2190ca5bc480dd257ebdc5b8a6 | 290 | py | Python | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 17 | 2019-05-04T04:24:28.000Z | 2021-12-14T02:43:24.000Z | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 214 | 2019-05-13T01:07:28.000Z | 2022-03-28T20:02:34.000Z | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 2 | 2019-06-15T05:01:50.000Z | 2019-07-04T02:29:55.000Z | from annofabapi.models import (
AdditionalDataDefinitionType,
AnnotationDataHoldingType,
AnnotationType,
InternationalizationMessage,
TaskPhase,
TaskStatus,
)
AnnotationData = Union[str, Dict[str, Any]]
FullAnnotationData = Any
AdditionalDataValue = Dict[str, Any]
| 22.307692 | 43 | 0.762069 | from annofabapi.models import (
AdditionalDataDefinitionType,
AnnotationDataHoldingType,
AnnotationType,
InternationalizationMessage,
TaskPhase,
TaskStatus,
)
AnnotationData = Union[str, Dict[str, Any]]
FullAnnotationData = Any
AdditionalDataValue = Dict[str, Any]
| true | true |
f72b0f69d6927ac9a2071aaa3c495a33948a8289 | 7,677 | py | Python | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for Epson projector."""
from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Epson projector from a config entry."""
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
_attr_supported_features = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PREVIOUS_TRACK
)
def __init__(self, projector, name, unique_id, entry):
"""Initialize entity to control Epson projector."""
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
"""Set unique id for projector config entry."""
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
"""Get attributes about the device."""
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| 31.592593 | 88 | 0.655464 | from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
_attr_supported_features = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PREVIOUS_TRACK
)
def __init__(self, projector, name, unique_id, entry):
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._unique_id
@property
def state(self):
return self._state
@property
def available(self):
return self._available
async def async_turn_on(self):
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
return self._source_list
@property
def source(self):
return self._source
@property
def volume_level(self):
return self._volume
async def select_cmode(self, cmode):
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
await self._projector.send_command(MUTE)
async def async_volume_up(self):
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
await self._projector.send_command(PLAY)
async def async_media_pause(self):
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| true | true |
f72b112a2a1fc41633e4d17514fd8efbba957fc5 | 299 | py | Python | World 02/Class 13/ex050.py | DanielRios549/PythonExcercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | 6 | 2021-05-04T22:09:16.000Z | 2022-01-08T20:27:39.000Z | World 02/Class 13/ex050.py | DanielRios549/PythonExercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | null | null | null | World 02/Class 13/ex050.py | DanielRios549/PythonExercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | null | null | null | '''
Get 6 integer numbers and show the sum of the even ones. Do not consider the odd ones.
'''
sum_number = 0
for count in range(0, 6):
number = int(input('Choose a number: '))
if number % 2 == 0:
sum_number += number
print(f'The sum of all even numbers equals {sum_number}')
| 23 | 90 | 0.64214 | sum_number = 0
for count in range(0, 6):
number = int(input('Choose a number: '))
if number % 2 == 0:
sum_number += number
print(f'The sum of all even numbers equals {sum_number}')
| true | true |
f72b11dd0aed4940421d5a68bccc46f47f43bad2 | 6,464 | py | Python | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])
r = self.get_module().conv2d_1452x1122_same(i, k)
r.print().assert_all_close()
def test_feature_padded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_same(i, k)
r.print().assert_all_close()
def test_feature_unpadded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_valid(i, k)
r.print().assert_all_close()
def test_batched_feature_unpadded(self):
i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_2452x2223_valid(i, k)
r.print().assert_all_close()
if __name__ == "__main__":
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf.test.main()
| 36.937143 | 79 | 0.642481 |
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])
r = self.get_module().conv2d_1452x1122_same(i, k)
r.print().assert_all_close()
def test_feature_padded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_same(i, k)
r.print().assert_all_close()
def test_feature_unpadded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_valid(i, k)
r.print().assert_all_close()
def test_batched_feature_unpadded(self):
i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_2452x2223_valid(i, k)
r.print().assert_all_close()
if __name__ == "__main__":
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf.test.main()
| true | true |
f72b11f17c30ee2bf5b08acdb6fe426742382acd | 26,208 | py | Python | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | 1 | 2020-01-21T04:30:10.000Z | 2020-01-21T04:30:10.000Z | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | null | null | null | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | null | null | null | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import imp
import os
from itertools import product
import six
from ansible import constants
from ansible.errors import AnsibleError
try:
# Try to import the Ansible 2 module first, it's the future-proof one
from ansible.parsing.splitter import split_args
except ImportError:
# Fallback on the Ansible 1.9 module
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
import ruamel.yaml
try:
from ansible.utils import parse_yaml_from_file
from ansible.utils import path_dwim
from ansible.utils.template import template as ansible_template
from ansible.utils import module_finder
module_loader = module_finder
ANSIBLE_VERSION = 1
except ImportError:
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleParserError
ANSIBLE_VERSION = 2
# ansible-lint doesn't need/want to know about encrypted secrets, but it needs
# Ansible 2.3+ allows encrypted secrets within yaml files, so we pass a string
# as the password to enable such yaml files to be opened and parsed successfully.
DEFAULT_VAULT_PASSWORD = 'x'
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
try:
from ansible.plugins import module_loader
except ImportError:
from ansible.plugins.loader import module_loader
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
# Hack to skip the following exception when using to_json filter on a variable.
# I guess the filter doesn't like empty vars...
except (AnsibleError, ValueError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
'include_tasks': _include_children,
'import_tasks': _include_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'import_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')], parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'include_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')],
parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
elif k != 'dependencies':
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role)),
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, six.string_types):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
possible_paths.append(path_dwim(basedir, ''))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
current_path = os.path.join(role_path, th)
for dir, subdirs, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith(('.yml', '.yaml')):
thpath = os.path.join(dir, file)
results.append({'path': thpath, 'type': th})
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
'''Ensures tasks have an action key and strings are converted to python objects'''
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
# denormalize shell -> command conversion
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, six.string_types):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/ansible/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
if ANSIBLE_VERSION < 2:
task = normalize_task_v1(task)
else:
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
"""Parses yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
def get_first_cmd_arg(task):
try:
if 'cmd' in task['action']:
first_cmd_arg = task['action']['cmd'].split()[0]
else:
first_cmd_arg = task['action']['__ansible_arguments__'][0]
except IndexError:
return None
return first_cmd_arg
def append_skipped_rules(pyyaml_data, file_text, file_type):
"""Append 'skipped_rules' to individual tasks or single metadata block.
For a file, uses 2nd parser (ruamel.yaml) to pull comments out of
yaml subsets, check for '# noqa' skipped rules, and append any skips to the
original parser (pyyaml) data relied on by remainder of ansible-lint.
:param pyyaml_data: file text parsed via ansible and pyyaml.
:param file_text: raw file text.
:param file_type: type of file: tasks, handlers or meta.
:returns: original pyyaml_data altered with a 'skipped_rules' list added
to individual tasks, or added to the single metadata block.
"""
try:
yaml_skip = _append_skipped_rules(pyyaml_data, file_text, file_type)
except RuntimeError as exc:
# Notify user of skip error, do not stop, do not change exit code
print('Error trying to append skipped rules: {!r}'.format(exc))
return pyyaml_data
return yaml_skip
def _append_skipped_rules(pyyaml_data, file_text, file_type):
# parse file text using 2nd parser library
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if file_type in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif file_type == 'playbook':
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
else:
raise RuntimeError('Unexpected file type: {}'.format(file_type))
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook):
"""Return parts of playbook that contains tasks, and nested tasks.
:param playbook: playbook yaml from yaml parser.
:returns: list of task dictionaries.
"""
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks):
"""Get list of tasks from list made of tasks and nested tasks."""
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task):
return (
subtask
for k in NESTED_TASK_KEYS if k in task
for subtask in task[k]
)
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input):
"""Travese yaml for comments with rule skips and return list of rules."""
def traverse_yaml(obj):
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
yaml_comment_obj_strs = []
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split('\\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return rule_id_list
def get_rule_skips_from_line(line):
rule_id_list = []
if '# noqa' in line:
noqa_text = line.split('# noqa')[1]
rule_id_list = noqa_text.split()
return rule_id_list
| 35.657143 | 100 | 0.618781 |
import glob
import imp
import os
from itertools import product
import six
from ansible import constants
from ansible.errors import AnsibleError
try:
from ansible.parsing.splitter import split_args
except ImportError:
# Fallback on the Ansible 1.9 module
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
import ruamel.yaml
try:
from ansible.utils import parse_yaml_from_file
from ansible.utils import path_dwim
from ansible.utils.template import template as ansible_template
from ansible.utils import module_finder
module_loader = module_finder
ANSIBLE_VERSION = 1
except ImportError:
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleParserError
ANSIBLE_VERSION = 2
# ansible-lint doesn't need/want to know about encrypted secrets, but it needs
DEFAULT_VAULT_PASSWORD = 'x'
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
try:
from ansible.plugins import module_loader
except ImportError:
from ansible.plugins.loader import module_loader
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
except (AnsibleError, ValueError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
'include_tasks': _include_children,
'import_tasks': _include_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'import_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')], parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'include_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')],
parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
elif k != 'dependencies':
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role)),
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, six.string_types):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
possible_paths.append(path_dwim(basedir, ''))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
current_path = os.path.join(role_path, th)
for dir, subdirs, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith(('.yml', '.yaml')):
thpath = os.path.join(dir, file)
results.append({'path': thpath, 'type': th})
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, six.string_types):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/ansible/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
if ANSIBLE_VERSION < 2:
task = normalize_task_v1(task)
else:
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
def get_first_cmd_arg(task):
try:
if 'cmd' in task['action']:
first_cmd_arg = task['action']['cmd'].split()[0]
else:
first_cmd_arg = task['action']['__ansible_arguments__'][0]
except IndexError:
return None
return first_cmd_arg
def append_skipped_rules(pyyaml_data, file_text, file_type):
try:
yaml_skip = _append_skipped_rules(pyyaml_data, file_text, file_type)
except RuntimeError as exc:
# Notify user of skip error, do not stop, do not change exit code
print('Error trying to append skipped rules: {!r}'.format(exc))
return pyyaml_data
return yaml_skip
def _append_skipped_rules(pyyaml_data, file_text, file_type):
# parse file text using 2nd parser library
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if file_type in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif file_type == 'playbook':
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
else:
raise RuntimeError('Unexpected file type: {}'.format(file_type))
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook):
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks):
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task):
return (
subtask
for k in NESTED_TASK_KEYS if k in task
for subtask in task[k]
)
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input):
def traverse_yaml(obj):
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
yaml_comment_obj_strs = []
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split('\\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return rule_id_list
def get_rule_skips_from_line(line):
rule_id_list = []
if '
noqa_text = line.split('
rule_id_list = noqa_text.split()
return rule_id_list
| true | true |
f72b120e0e4865b2e5c26ca09713f83332de05bd | 43,459 | py | Python | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2016-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
import time
from collections import Counter, defaultdict
from copy import deepcopy
from six import iteritems
from datadog_checks.checks.openmetrics import OpenMetricsBaseCheck
from datadog_checks.config import is_affirmative
from datadog_checks.errors import CheckException
from datadog_checks.utils.common import to_string
try:
# this module is only available in agent 6
from datadog_agent import get_clustername
except ImportError:
def get_clustername():
return ""
METRIC_TYPES = ['counter', 'gauge']
# As case can vary depending on Kubernetes versions, we match the lowercase string
WHITELISTED_WAITING_REASONS = ['errimagepull', 'imagepullbackoff', 'crashloopbackoff', 'containercreating']
WHITELISTED_TERMINATED_REASONS = ['oomkilled', 'containercannotrun', 'error']
kube_labels_mapper = {
'namespace': 'kube_namespace',
'job': 'kube_job',
'cronjob': 'kube_cronjob',
'pod': 'pod_name',
'phase': 'pod_phase',
'daemonset': 'kube_daemon_set',
'replicationcontroller': 'kube_replication_controller',
'replicaset': 'kube_replica_set',
'statefulset ': 'kube_stateful_set',
'deployment': 'kube_deployment',
'container': 'kube_container_name',
'container_id': 'container_id',
'image': 'image_name',
}
class KubernetesState(OpenMetricsBaseCheck):
"""
Collect kube-state-metrics metrics in the Prometheus format
See https://github.com/kubernetes/kube-state-metrics
"""
class CronJobCount:
def __init__(self):
self.count = 0
self.previous_run_max_ts = 0
self.current_run_max_ts = 0
def set_previous_and_reset_current_ts(self):
if self.current_run_max_ts > 0:
self.previous_run_max_ts = self.current_run_max_ts
self.current_run_max_ts = 0
def update_current_ts_and_add_count(self, job_ts, count):
if job_ts > self.previous_run_max_ts and count > 0:
self.count += count
self.current_run_max_ts = max(self.current_run_max_ts, job_ts)
DEFAULT_METRIC_LIMIT = 0
def __init__(self, name, init_config, agentConfig, instances=None):
# We do not support more than one instance of kube-state-metrics
instance = instances[0]
kubernetes_state_instance = self._create_kubernetes_state_prometheus_instance(instance)
# First deprecation phase: we keep ksm labels by default
# Next iteration: remove ksm labels by default
# Last iteration: remove this option
self.keep_ksm_labels = is_affirmative(kubernetes_state_instance.get('keep_ksm_labels', True))
generic_instances = [kubernetes_state_instance]
super(KubernetesState, self).__init__(name, init_config, agentConfig, instances=generic_instances)
self.condition_to_status_positive = {'true': self.OK, 'false': self.CRITICAL, 'unknown': self.UNKNOWN}
self.condition_to_status_negative = {'true': self.CRITICAL, 'false': self.OK, 'unknown': self.UNKNOWN}
# Parameters for the count_objects_by_tags method
self.object_count_params = {
'kube_persistentvolume_status_phase': {
'metric_name': 'persistentvolumes.by_phase',
'allowed_labels': ['storageclass', 'phase'],
},
'kube_service_spec_type': {'metric_name': 'service.count', 'allowed_labels': ['namespace', 'type']},
}
self.METRIC_TRANSFORMERS = {
'kube_pod_status_phase': self.kube_pod_status_phase,
'kube_pod_container_status_waiting_reason': self.kube_pod_container_status_waiting_reason,
'kube_pod_container_status_terminated_reason': self.kube_pod_container_status_terminated_reason,
'kube_cronjob_next_schedule_time': self.kube_cronjob_next_schedule_time,
'kube_job_complete': self.kube_job_complete,
'kube_job_failed': self.kube_job_failed,
'kube_job_status_failed': self.kube_job_status_failed,
'kube_job_status_succeeded': self.kube_job_status_succeeded,
'kube_node_status_condition': self.kube_node_status_condition,
'kube_node_status_ready': self.kube_node_status_ready,
'kube_node_status_out_of_disk': self.kube_node_status_out_of_disk,
'kube_node_status_memory_pressure': self.kube_node_status_memory_pressure,
'kube_node_status_disk_pressure': self.kube_node_status_disk_pressure,
'kube_node_status_network_unavailable': self.kube_node_status_network_unavailable,
'kube_node_spec_unschedulable': self.kube_node_spec_unschedulable,
'kube_resourcequota': self.kube_resourcequota,
'kube_limitrange': self.kube_limitrange,
'kube_persistentvolume_status_phase': self.count_objects_by_tags,
'kube_service_spec_type': self.count_objects_by_tags,
}
# Handling cron jobs succeeded/failed counts
self.failed_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.succeeded_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
# Logic for Jobs
self.job_succeeded_count = defaultdict(int)
self.job_failed_count = defaultdict(int)
def check(self, instance):
endpoint = instance.get('kube_state_url')
scraper_config = self.config_map[endpoint]
self.process(scraper_config, metric_transformers=self.METRIC_TRANSFORMERS)
# Logic for Cron Jobs
for job_tags, job in iteritems(self.failed_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job in iteritems(self.succeeded_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
# Logic for Jobs
for job_tags, job_count in iteritems(self.job_succeeded_count):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job_count, list(job_tags))
for job_tags, job_count in iteritems(self.job_failed_count):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job_count, list(job_tags))
def _filter_metric(self, metric, scraper_config):
if scraper_config['telemetry']:
# name is like "kube_pod_execution_duration"
name_part = metric.name.split("_", 3)
if len(name_part) < 2:
return False
family = name_part[1]
tags = ["resource_name:" + family]
for sample in metric.samples:
if "namespace" in sample[self.SAMPLE_LABELS]:
ns = sample[self.SAMPLE_LABELS]["namespace"]
tags.append("resource_namespace:" + ns)
break
self._send_telemetry_counter(
'collector.metrics.count', len(metric.samples), scraper_config, extra_tags=tags
)
# do not filter
return False
def _create_kubernetes_state_prometheus_instance(self, instance):
"""
Set up the kubernetes_state instance so it can be used in OpenMetricsBaseCheck
"""
ksm_instance = deepcopy(instance)
endpoint = instance.get('kube_state_url')
if endpoint is None:
raise CheckException("Unable to find kube_state_url in config file.")
extra_labels = ksm_instance.get('label_joins', {})
hostname_override = is_affirmative(ksm_instance.get('hostname_override', True))
ksm_instance.update(
{
'namespace': 'kubernetes_state',
'metrics': [
{
'kube_daemonset_status_current_number_scheduled': 'daemonset.scheduled',
'kube_daemonset_status_desired_number_scheduled': 'daemonset.desired',
'kube_daemonset_status_number_misscheduled': 'daemonset.misscheduled',
'kube_daemonset_status_number_ready': 'daemonset.ready',
'kube_daemonset_updated_number_scheduled': 'daemonset.updated',
'kube_deployment_spec_paused': 'deployment.paused',
'kube_deployment_spec_replicas': 'deployment.replicas_desired',
'kube_deployment_spec_strategy_rollingupdate_max_unavailable': 'deployment.rollingupdate.max_unavailable', # noqa: E501
'kube_deployment_status_replicas': 'deployment.replicas',
'kube_deployment_status_replicas_available': 'deployment.replicas_available',
'kube_deployment_status_replicas_unavailable': 'deployment.replicas_unavailable',
'kube_deployment_status_replicas_updated': 'deployment.replicas_updated',
'kube_endpoint_address_available': 'endpoint.address_available',
'kube_endpoint_address_not_ready': 'endpoint.address_not_ready',
'kube_endpoint_created': 'endpoint.created',
'kube_hpa_spec_min_replicas': 'hpa.min_replicas',
'kube_hpa_spec_max_replicas': 'hpa.max_replicas',
'kube_hpa_status_desired_replicas': 'hpa.desired_replicas',
'kube_hpa_status_current_replicas': 'hpa.current_replicas',
'kube_hpa_status_condition': 'hpa.condition',
'kube_node_info': 'node.count',
'kube_node_status_allocatable_cpu_cores': 'node.cpu_allocatable',
'kube_node_status_allocatable_memory_bytes': 'node.memory_allocatable',
'kube_node_status_allocatable_pods': 'node.pods_allocatable',
'kube_node_status_capacity_cpu_cores': 'node.cpu_capacity',
'kube_node_status_capacity_memory_bytes': 'node.memory_capacity',
'kube_node_status_capacity_pods': 'node.pods_capacity',
'kube_node_status_allocatable_nvidia_gpu_cards': 'node.gpu.cards_allocatable',
'kube_node_status_capacity_nvidia_gpu_cards': 'node.gpu.cards_capacity',
'kube_pod_container_status_terminated': 'container.terminated',
'kube_pod_container_status_waiting': 'container.waiting',
'kube_persistentvolumeclaim_status_phase': 'persistentvolumeclaim.status',
'kube_persistentvolumeclaim_resource_requests_storage_bytes': 'persistentvolumeclaim.request_storage', # noqa: E501
'kube_pod_container_resource_limits_cpu_cores': 'container.cpu_limit',
'kube_pod_container_resource_limits_memory_bytes': 'container.memory_limit',
'kube_pod_container_resource_requests_cpu_cores': 'container.cpu_requested',
'kube_pod_container_resource_requests_memory_bytes': 'container.memory_requested',
'kube_pod_container_status_ready': 'container.ready',
'kube_pod_container_status_restarts': 'container.restarts', # up to kube-state-metrics 1.1.x
'kube_pod_container_status_restarts_total': 'container.restarts', # noqa: E501, from kube-state-metrics 1.2.0
'kube_pod_container_status_running': 'container.running',
'kube_pod_container_resource_requests_nvidia_gpu_devices': 'container.gpu.request',
'kube_pod_container_resource_limits_nvidia_gpu_devices': 'container.gpu.limit',
'kube_pod_status_ready': 'pod.ready',
'kube_pod_status_scheduled': 'pod.scheduled',
'kube_poddisruptionbudget_status_current_healthy': 'pdb.pods_healthy',
'kube_poddisruptionbudget_status_desired_healthy': 'pdb.pods_desired',
'kube_poddisruptionbudget_status_pod_disruptions_allowed': 'pdb.disruptions_allowed',
'kube_poddisruptionbudget_status_expected_pods': 'pdb.pods_total',
'kube_replicaset_spec_replicas': 'replicaset.replicas_desired',
'kube_replicaset_status_fully_labeled_replicas': 'replicaset.fully_labeled_replicas',
'kube_replicaset_status_ready_replicas': 'replicaset.replicas_ready',
'kube_replicaset_status_replicas': 'replicaset.replicas',
'kube_replicationcontroller_spec_replicas': 'replicationcontroller.replicas_desired',
'kube_replicationcontroller_status_available_replicas': 'replicationcontroller.replicas_available', # noqa: E501
'kube_replicationcontroller_status_fully_labeled_replicas': 'replicationcontroller.fully_labeled_replicas', # noqa: E501
'kube_replicationcontroller_status_ready_replicas': 'replicationcontroller.replicas_ready',
'kube_replicationcontroller_status_replicas': 'replicationcontroller.replicas',
'kube_statefulset_replicas': 'statefulset.replicas_desired',
'kube_statefulset_status_replicas': 'statefulset.replicas',
'kube_statefulset_status_replicas_current': 'statefulset.replicas_current',
'kube_statefulset_status_replicas_ready': 'statefulset.replicas_ready',
'kube_statefulset_status_replicas_updated': 'statefulset.replicas_updated',
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_lowerbound': (
'vpa.lower_bound'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_target': (
'vpa.target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_uncappedtarget': (
'vpa.uncapped_target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_upperbound': (
'vpa.upperbound'
),
'kube_verticalpodautoscaler_spec_updatepolicy_updatemode': 'vpa.update_mode',
}
],
'ignore_metrics': [
# _info, _labels and _created don't convey any metric
'kube_cronjob_info',
'kube_cronjob_created',
'kube_daemonset_created',
'kube_deployment_created',
'kube_deployment_labels',
'kube_job_created',
'kube_job_info',
'kube_limitrange_created',
'kube_namespace_created',
'kube_namespace_labels',
'kube_node_created',
'kube_node_labels',
'kube_pod_created',
'kube_pod_container_info',
'kube_pod_info',
'kube_pod_owner',
'kube_pod_start_time',
'kube_pod_labels',
'kube_poddisruptionbudget_created',
'kube_replicaset_created',
'kube_replicationcontroller_created',
'kube_resourcequota_created',
'kube_replicaset_owner',
'kube_service_created',
'kube_service_info',
'kube_service_labels',
'kube_service_spec_external_ip',
'kube_service_status_load_balancer_ingress',
'kube_statefulset_labels',
'kube_statefulset_created',
'kube_statefulset_status_current_revision',
'kube_statefulset_status_update_revision',
# Already provided by the kubelet integration
'kube_pod_container_status_last_terminated_reason',
# _generation metrics are more metadata than metrics, no real use case for now
'kube_daemonset_metadata_generation',
'kube_deployment_metadata_generation',
'kube_deployment_status_observed_generation',
'kube_replicaset_metadata_generation',
'kube_replicaset_status_observed_generation',
'kube_replicationcontroller_metadata_generation',
'kube_replicationcontroller_status_observed_generation',
'kube_statefulset_metadata_generation',
'kube_statefulset_status_observed_generation',
'kube_hpa_metadata_generation',
# kube_node_status_phase and kube_namespace_status_phase have no use case as a service check
'kube_namespace_status_phase',
'kube_node_status_phase',
# These CronJob and Job metrics need use cases to determine how do implement
'kube_cronjob_status_active',
'kube_cronjob_status_last_schedule_time',
'kube_cronjob_spec_suspend',
'kube_cronjob_spec_starting_deadline_seconds',
'kube_job_spec_active_dealine_seconds',
'kube_job_spec_completions',
'kube_job_spec_parallelism',
'kube_job_status_active',
'kube_job_status_completion_time', # We could compute the duration=completion-start as a gauge
'kube_job_status_start_time',
'kube_verticalpodautoscaler_labels',
],
'label_joins': {
'kube_pod_info': {'label_to_match': 'pod', 'labels_to_get': ['node']},
'kube_pod_status_phase': {'label_to_match': 'pod', 'labels_to_get': ['phase']},
'kube_persistentvolume_info': {
'label_to_match': 'persistentvolume',
'labels_to_get': ['storageclass'],
},
'kube_persistentvolumeclaim_info': {
'label_to_match': 'persistentvolumeclaim',
'labels_to_get': ['storageclass'],
},
},
# Defaults that were set when kubernetes_state was based on PrometheusCheck
'send_monotonic_counter': ksm_instance.get('send_monotonic_counter', False),
'health_service_check': ksm_instance.get('health_service_check', False),
}
)
ksm_instance['prometheus_url'] = endpoint
ksm_instance['label_joins'].update(extra_labels)
if hostname_override:
ksm_instance['label_to_hostname'] = 'node'
clustername = get_clustername()
if clustername != "":
ksm_instance['label_to_hostname_suffix'] = "-" + clustername
if 'labels_mapper' in ksm_instance and not isinstance(ksm_instance['labels_mapper'], dict):
self.log.warning("Option labels_mapper should be a dictionary for %s", endpoint)
return ksm_instance
def _condition_to_service_check(self, sample, sc_name, mapping, tags=None):
"""
Some metrics contains conditions, labels that have "condition" as name and "true", "false", or "unknown"
as value. The metric value is expected to be a gauge equal to 0 or 1 in this case.
For example:
metric {
label { name: "condition", value: "true"
}
# other labels here
gauge { value: 1.0 }
}
This function evaluates metrics containing conditions and sends a service check
based on a provided condition->check mapping dict
"""
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1
condition = sample[self.SAMPLE_LABELS].get('condition')
if condition:
if condition in mapping:
self.service_check(sc_name, mapping[condition], tags=tags)
else:
self.log.debug("Unable to handle %s - unknown condition %s", sc_name, condition)
def _condition_to_tag_check(self, sample, base_sc_name, mapping, scraper_config, tags=None):
"""
Metrics from kube-state-metrics have changed
For example:
kube_node_status_condition{condition="Ready",node="ip-172-33-39-189.eu-west-1.compute",status="true"} 1
kube_node_status_condition{condition="OutOfDisk",node="ip-172-33-57-130.eu-west-1.compute",status="false"} 1
metric {
label { name: "condition", value: "true"
}
# other labels here
gauge { value: 1.0 }
}
This function evaluates metrics containing conditions and sends a service check
based on a provided condition->check mapping dict
"""
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1 and we are not processing the pod phase check
label_value, condition_map = self._get_metric_condition_map(base_sc_name, sample[self.SAMPLE_LABELS])
service_check_name = condition_map['service_check_name']
mapping = condition_map['mapping']
node = self._label_to_tag('node', sample[self.SAMPLE_LABELS], scraper_config)
condition = self._label_to_tag('condition', sample[self.SAMPLE_LABELS], scraper_config)
message = "{} is currently reporting {} = {}".format(node, condition, label_value)
if condition_map['service_check_name'] is None:
self.log.debug("Unable to handle %s - unknown condition %s", service_check_name, label_value)
else:
self.service_check(service_check_name, mapping[label_value], tags=tags, message=message)
def _get_metric_condition_map(self, base_sc_name, labels):
if base_sc_name == 'kubernetes_state.node':
switch = {
'Ready': {'service_check_name': base_sc_name + '.ready', 'mapping': self.condition_to_status_positive},
'OutOfDisk': {
'service_check_name': base_sc_name + '.out_of_disk',
'mapping': self.condition_to_status_negative,
},
'DiskPressure': {
'service_check_name': base_sc_name + '.disk_pressure',
'mapping': self.condition_to_status_negative,
},
'NetworkUnavailable': {
'service_check_name': base_sc_name + '.network_unavailable',
'mapping': self.condition_to_status_negative,
},
'MemoryPressure': {
'service_check_name': base_sc_name + '.memory_pressure',
'mapping': self.condition_to_status_negative,
},
}
return (
labels.get('status'),
switch.get(labels.get('condition'), {'service_check_name': None, 'mapping': None}),
)
def _format_tag(self, name, value, scraper_config):
"""
Lookups the labels_mapper table to see if replacing the tag name is
necessary, then returns a "name:value" tag string
"""
return '%s:%s' % (scraper_config['labels_mapper'].get(name, name), to_string(value).lower())
def _label_to_tag(self, name, labels, scraper_config, tag_name=None):
"""
Search for `name` in labels name and returns corresponding tag string.
Tag name is label name if not specified.
Returns None if name was not found.
"""
value = labels.get(name)
if value:
return self._format_tag(tag_name or name, value, scraper_config)
else:
return None
def _label_to_tags(self, name, labels, scraper_config, tag_name=None):
"""
Search for `name` in labels name and returns corresponding tags string.
Tag name is label name if not specified.
Returns an empty list if name was not found.
"""
value = labels.get(name)
tags = []
if value:
tags += self._build_tags(tag_name or name, value, scraper_config)
return tags
def _trim_job_tag(self, name):
"""
Trims suffix of job names if they match -(\\d{4,10}$)
"""
pattern = r"(-\d{4,10}$)"
return re.sub(pattern, '', name)
def _extract_job_timestamp(self, name):
"""
Extract timestamp of job names
"""
ts = name.split('-')[-1]
if ts.isdigit():
return int(ts)
else:
msg = 'Cannot extract ts from job name {}'
self.log.debug(msg, name)
return None
# Labels attached: namespace, pod
# As a message the phase=Pending|Running|Succeeded|Failed|Unknown
# From the phase the check will update its status
# Also submits as an aggregated count with minimal tags so it is
# visualisable over time per namespace and phase
def kube_pod_status_phase(self, metric, scraper_config):
""" Phase a pod is in. """
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod churn,
# pod granularity available in the service checks
tags = (
self._label_to_tags('namespace', sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags('phase', sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags))
def _submit_metric_kube_pod_container_status_reason(
self, metric, metric_suffix, whitelisted_status_reasons, scraper_config
):
metric_name = scraper_config['namespace'] + metric_suffix
for sample in metric.samples:
tags = []
reason = sample[self.SAMPLE_LABELS].get('reason')
if reason:
# Filtering according to the reason here is paramount to limit cardinality
if reason.lower() in whitelisted_status_reasons:
tags += self._build_tags('reason', reason, scraper_config)
else:
continue
if 'container' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('kube_container_name', sample[self.SAMPLE_LABELS]['container'], scraper_config)
if 'namespace' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('namespace', sample[self.SAMPLE_LABELS]['namespace'], scraper_config)
if 'pod' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('pod', sample[self.SAMPLE_LABELS]['pod'], scraper_config)
self.gauge(
metric_name,
sample[self.SAMPLE_VALUE],
tags + scraper_config['custom_tags'],
hostname=self.get_hostname_for_sample(sample, scraper_config),
)
def kube_pod_container_status_waiting_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.waiting', WHITELISTED_WAITING_REASONS, scraper_config
)
def kube_pod_container_status_terminated_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.terminated', WHITELISTED_TERMINATED_REASONS, scraper_config
)
def kube_cronjob_next_schedule_time(self, metric, scraper_config):
""" Time until the next schedule """
# Used as a service check so that one can be alerted if the cronjob's next schedule is in the past
check_basename = scraper_config['namespace'] + '.cronjob.on_schedule_check'
curr_time = int(time.time())
for sample in metric.samples:
on_schedule = int(sample[self.SAMPLE_VALUE]) - curr_time
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
if on_schedule < 0:
message = "The service check scheduled at {} is {} seconds late".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(sample[self.SAMPLE_VALUE]))), on_schedule
)
self.service_check(check_basename, self.CRITICAL, tags=tags, message=message)
else:
self.service_check(check_basename, self.OK, tags=tags)
def kube_job_complete(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.OK, tags=tags + scraper_config['custom_tags'])
def kube_job_failed(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.CRITICAL, tags=tags + scraper_config['custom_tags'])
def kube_job_status_failed(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None: # if there is a timestamp, this is a Cron Job
self.failed_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_failed_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_job_status_succeeded(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None: # if there is a timestamp, this is a Cron Job
self.succeeded_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_succeeded_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_node_status_condition(self, metric, scraper_config):
""" The ready status of a cluster node. v1.0+"""
base_check_name = scraper_config['namespace'] + '.node'
metric_name = scraper_config['namespace'] + '.nodes.by_condition'
by_condition_counter = Counter()
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_tag_check(
sample,
base_check_name,
self.condition_to_status_positive,
scraper_config,
tags=node_tags + scraper_config['custom_tags'],
)
# Counts aggregated cluster-wide to avoid no-data issues on node churn,
# node granularity available in the service checks
tags = (
self._label_to_tags("condition", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("status", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
by_condition_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(by_condition_counter):
self.gauge(metric_name, count, tags=list(tags))
def kube_node_status_ready(self, metric, scraper_config):
""" The ready status of a cluster node (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.ready'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_positive,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_out_of_disk(self, metric, scraper_config):
""" Whether the node is out of disk space (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.out_of_disk'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_memory_pressure(self, metric, scraper_config):
""" Whether the node is in a memory pressure state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.memory_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_disk_pressure(self, metric, scraper_config):
""" Whether the node is in a disk pressure state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.disk_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_network_unavailable(self, metric, scraper_config):
""" Whether the node is in a network unavailable state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.network_unavailable'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_spec_unschedulable(self, metric, scraper_config):
""" Whether a node can schedule new pods. """
metric_name = scraper_config['namespace'] + '.node.status'
statuses = ('schedulable', 'unschedulable')
if metric.type in METRIC_TYPES:
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
status = statuses[int(sample[self.SAMPLE_VALUE])] # value can be 0 or 1
tags += self._build_tags('status', status, scraper_config)
self.gauge(metric_name, 1, tags) # metric value is always one, value is on the tags
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_resourcequota(self, metric, scraper_config):
""" Quota and current usage by resource type. """
metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}'
suffixes = {'used': 'used', 'hard': 'limit'}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
mtype = sample[self.SAMPLE_LABELS].get("type")
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("resourcequota", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_limitrange(self, metric, scraper_config):
""" Resource limits by consumer type. """
# type's cardinality's low: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3872-L3879
# idem for resource: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3342-L3352
# idem for constraint: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3882-L3901
metric_base_name = scraper_config['namespace'] + '.limitrange.{}.{}'
constraints = {
'min': 'min',
'max': 'max',
'default': 'default',
'defaultRequest': 'default_request',
'maxLimitRequestRatio': 'max_limit_request_ratio',
}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
constraint = sample[self.SAMPLE_LABELS].get("constraint")
if constraint in constraints:
constraint = constraints[constraint]
else:
self.log.error("Constraint %s unsupported for metric %s", constraint, metric.name)
continue
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("type", sample[self.SAMPLE_LABELS], scraper_config, tag_name="consumer_type")
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, constraint), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def count_objects_by_tags(self, metric, scraper_config):
""" Count objects by whitelisted tags and submit counts as gauges. """
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name'])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels']
] + scraper_config['custom_tags']
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags))
def _build_tags(self, label_name, label_value, scraper_config, hostname=None):
"""
Build a list of formated tags from `label_name` parameter. It also depend of the
check configuration ('keep_ksm_labels' parameter)
"""
tags = []
# first use the labels_mapper
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
# then try to use the kube_labels_mapper
kube_tag_name = kube_labels_mapper.get(tag_name, tag_name)
label_value = to_string(label_value).lower()
tags.append('{}:{}'.format(to_string(kube_tag_name), label_value))
if self.keep_ksm_labels and (kube_tag_name != tag_name):
tags.append('{}:{}'.format(to_string(tag_name), label_value))
return tags
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
"""
Redefine this method to allow labels duplication, during migration phase
"""
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags += scraper_config['_metric_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
_tags += self._build_tags(label_name, label_value, scraper_config)
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
| 52.046707 | 145 | 0.625463 |
import re
import time
from collections import Counter, defaultdict
from copy import deepcopy
from six import iteritems
from datadog_checks.checks.openmetrics import OpenMetricsBaseCheck
from datadog_checks.config import is_affirmative
from datadog_checks.errors import CheckException
from datadog_checks.utils.common import to_string
try:
from datadog_agent import get_clustername
except ImportError:
def get_clustername():
return ""
METRIC_TYPES = ['counter', 'gauge']
WHITELISTED_WAITING_REASONS = ['errimagepull', 'imagepullbackoff', 'crashloopbackoff', 'containercreating']
WHITELISTED_TERMINATED_REASONS = ['oomkilled', 'containercannotrun', 'error']
kube_labels_mapper = {
'namespace': 'kube_namespace',
'job': 'kube_job',
'cronjob': 'kube_cronjob',
'pod': 'pod_name',
'phase': 'pod_phase',
'daemonset': 'kube_daemon_set',
'replicationcontroller': 'kube_replication_controller',
'replicaset': 'kube_replica_set',
'statefulset ': 'kube_stateful_set',
'deployment': 'kube_deployment',
'container': 'kube_container_name',
'container_id': 'container_id',
'image': 'image_name',
}
class KubernetesState(OpenMetricsBaseCheck):
class CronJobCount:
def __init__(self):
self.count = 0
self.previous_run_max_ts = 0
self.current_run_max_ts = 0
def set_previous_and_reset_current_ts(self):
if self.current_run_max_ts > 0:
self.previous_run_max_ts = self.current_run_max_ts
self.current_run_max_ts = 0
def update_current_ts_and_add_count(self, job_ts, count):
if job_ts > self.previous_run_max_ts and count > 0:
self.count += count
self.current_run_max_ts = max(self.current_run_max_ts, job_ts)
DEFAULT_METRIC_LIMIT = 0
def __init__(self, name, init_config, agentConfig, instances=None):
instance = instances[0]
kubernetes_state_instance = self._create_kubernetes_state_prometheus_instance(instance)
self.keep_ksm_labels = is_affirmative(kubernetes_state_instance.get('keep_ksm_labels', True))
generic_instances = [kubernetes_state_instance]
super(KubernetesState, self).__init__(name, init_config, agentConfig, instances=generic_instances)
self.condition_to_status_positive = {'true': self.OK, 'false': self.CRITICAL, 'unknown': self.UNKNOWN}
self.condition_to_status_negative = {'true': self.CRITICAL, 'false': self.OK, 'unknown': self.UNKNOWN}
self.object_count_params = {
'kube_persistentvolume_status_phase': {
'metric_name': 'persistentvolumes.by_phase',
'allowed_labels': ['storageclass', 'phase'],
},
'kube_service_spec_type': {'metric_name': 'service.count', 'allowed_labels': ['namespace', 'type']},
}
self.METRIC_TRANSFORMERS = {
'kube_pod_status_phase': self.kube_pod_status_phase,
'kube_pod_container_status_waiting_reason': self.kube_pod_container_status_waiting_reason,
'kube_pod_container_status_terminated_reason': self.kube_pod_container_status_terminated_reason,
'kube_cronjob_next_schedule_time': self.kube_cronjob_next_schedule_time,
'kube_job_complete': self.kube_job_complete,
'kube_job_failed': self.kube_job_failed,
'kube_job_status_failed': self.kube_job_status_failed,
'kube_job_status_succeeded': self.kube_job_status_succeeded,
'kube_node_status_condition': self.kube_node_status_condition,
'kube_node_status_ready': self.kube_node_status_ready,
'kube_node_status_out_of_disk': self.kube_node_status_out_of_disk,
'kube_node_status_memory_pressure': self.kube_node_status_memory_pressure,
'kube_node_status_disk_pressure': self.kube_node_status_disk_pressure,
'kube_node_status_network_unavailable': self.kube_node_status_network_unavailable,
'kube_node_spec_unschedulable': self.kube_node_spec_unschedulable,
'kube_resourcequota': self.kube_resourcequota,
'kube_limitrange': self.kube_limitrange,
'kube_persistentvolume_status_phase': self.count_objects_by_tags,
'kube_service_spec_type': self.count_objects_by_tags,
}
self.failed_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.succeeded_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.job_succeeded_count = defaultdict(int)
self.job_failed_count = defaultdict(int)
def check(self, instance):
endpoint = instance.get('kube_state_url')
scraper_config = self.config_map[endpoint]
self.process(scraper_config, metric_transformers=self.METRIC_TRANSFORMERS)
for job_tags, job in iteritems(self.failed_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job in iteritems(self.succeeded_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job_count in iteritems(self.job_succeeded_count):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job_count, list(job_tags))
for job_tags, job_count in iteritems(self.job_failed_count):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job_count, list(job_tags))
def _filter_metric(self, metric, scraper_config):
if scraper_config['telemetry']:
name_part = metric.name.split("_", 3)
if len(name_part) < 2:
return False
family = name_part[1]
tags = ["resource_name:" + family]
for sample in metric.samples:
if "namespace" in sample[self.SAMPLE_LABELS]:
ns = sample[self.SAMPLE_LABELS]["namespace"]
tags.append("resource_namespace:" + ns)
break
self._send_telemetry_counter(
'collector.metrics.count', len(metric.samples), scraper_config, extra_tags=tags
)
return False
def _create_kubernetes_state_prometheus_instance(self, instance):
ksm_instance = deepcopy(instance)
endpoint = instance.get('kube_state_url')
if endpoint is None:
raise CheckException("Unable to find kube_state_url in config file.")
extra_labels = ksm_instance.get('label_joins', {})
hostname_override = is_affirmative(ksm_instance.get('hostname_override', True))
ksm_instance.update(
{
'namespace': 'kubernetes_state',
'metrics': [
{
'kube_daemonset_status_current_number_scheduled': 'daemonset.scheduled',
'kube_daemonset_status_desired_number_scheduled': 'daemonset.desired',
'kube_daemonset_status_number_misscheduled': 'daemonset.misscheduled',
'kube_daemonset_status_number_ready': 'daemonset.ready',
'kube_daemonset_updated_number_scheduled': 'daemonset.updated',
'kube_deployment_spec_paused': 'deployment.paused',
'kube_deployment_spec_replicas': 'deployment.replicas_desired',
'kube_deployment_spec_strategy_rollingupdate_max_unavailable': 'deployment.rollingupdate.max_unavailable',
'kube_deployment_status_replicas': 'deployment.replicas',
'kube_deployment_status_replicas_available': 'deployment.replicas_available',
'kube_deployment_status_replicas_unavailable': 'deployment.replicas_unavailable',
'kube_deployment_status_replicas_updated': 'deployment.replicas_updated',
'kube_endpoint_address_available': 'endpoint.address_available',
'kube_endpoint_address_not_ready': 'endpoint.address_not_ready',
'kube_endpoint_created': 'endpoint.created',
'kube_hpa_spec_min_replicas': 'hpa.min_replicas',
'kube_hpa_spec_max_replicas': 'hpa.max_replicas',
'kube_hpa_status_desired_replicas': 'hpa.desired_replicas',
'kube_hpa_status_current_replicas': 'hpa.current_replicas',
'kube_hpa_status_condition': 'hpa.condition',
'kube_node_info': 'node.count',
'kube_node_status_allocatable_cpu_cores': 'node.cpu_allocatable',
'kube_node_status_allocatable_memory_bytes': 'node.memory_allocatable',
'kube_node_status_allocatable_pods': 'node.pods_allocatable',
'kube_node_status_capacity_cpu_cores': 'node.cpu_capacity',
'kube_node_status_capacity_memory_bytes': 'node.memory_capacity',
'kube_node_status_capacity_pods': 'node.pods_capacity',
'kube_node_status_allocatable_nvidia_gpu_cards': 'node.gpu.cards_allocatable',
'kube_node_status_capacity_nvidia_gpu_cards': 'node.gpu.cards_capacity',
'kube_pod_container_status_terminated': 'container.terminated',
'kube_pod_container_status_waiting': 'container.waiting',
'kube_persistentvolumeclaim_status_phase': 'persistentvolumeclaim.status',
'kube_persistentvolumeclaim_resource_requests_storage_bytes': 'persistentvolumeclaim.request_storage',
'kube_pod_container_resource_limits_cpu_cores': 'container.cpu_limit',
'kube_pod_container_resource_limits_memory_bytes': 'container.memory_limit',
'kube_pod_container_resource_requests_cpu_cores': 'container.cpu_requested',
'kube_pod_container_resource_requests_memory_bytes': 'container.memory_requested',
'kube_pod_container_status_ready': 'container.ready',
'kube_pod_container_status_restarts': 'container.restarts',
'kube_pod_container_status_restarts_total': 'container.restarts',
'kube_pod_container_status_running': 'container.running',
'kube_pod_container_resource_requests_nvidia_gpu_devices': 'container.gpu.request',
'kube_pod_container_resource_limits_nvidia_gpu_devices': 'container.gpu.limit',
'kube_pod_status_ready': 'pod.ready',
'kube_pod_status_scheduled': 'pod.scheduled',
'kube_poddisruptionbudget_status_current_healthy': 'pdb.pods_healthy',
'kube_poddisruptionbudget_status_desired_healthy': 'pdb.pods_desired',
'kube_poddisruptionbudget_status_pod_disruptions_allowed': 'pdb.disruptions_allowed',
'kube_poddisruptionbudget_status_expected_pods': 'pdb.pods_total',
'kube_replicaset_spec_replicas': 'replicaset.replicas_desired',
'kube_replicaset_status_fully_labeled_replicas': 'replicaset.fully_labeled_replicas',
'kube_replicaset_status_ready_replicas': 'replicaset.replicas_ready',
'kube_replicaset_status_replicas': 'replicaset.replicas',
'kube_replicationcontroller_spec_replicas': 'replicationcontroller.replicas_desired',
'kube_replicationcontroller_status_available_replicas': 'replicationcontroller.replicas_available',
'kube_replicationcontroller_status_fully_labeled_replicas': 'replicationcontroller.fully_labeled_replicas',
'kube_replicationcontroller_status_ready_replicas': 'replicationcontroller.replicas_ready',
'kube_replicationcontroller_status_replicas': 'replicationcontroller.replicas',
'kube_statefulset_replicas': 'statefulset.replicas_desired',
'kube_statefulset_status_replicas': 'statefulset.replicas',
'kube_statefulset_status_replicas_current': 'statefulset.replicas_current',
'kube_statefulset_status_replicas_ready': 'statefulset.replicas_ready',
'kube_statefulset_status_replicas_updated': 'statefulset.replicas_updated',
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_lowerbound': (
'vpa.lower_bound'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_target': (
'vpa.target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_uncappedtarget': (
'vpa.uncapped_target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_upperbound': (
'vpa.upperbound'
),
'kube_verticalpodautoscaler_spec_updatepolicy_updatemode': 'vpa.update_mode',
}
],
'ignore_metrics': [
'kube_cronjob_info',
'kube_cronjob_created',
'kube_daemonset_created',
'kube_deployment_created',
'kube_deployment_labels',
'kube_job_created',
'kube_job_info',
'kube_limitrange_created',
'kube_namespace_created',
'kube_namespace_labels',
'kube_node_created',
'kube_node_labels',
'kube_pod_created',
'kube_pod_container_info',
'kube_pod_info',
'kube_pod_owner',
'kube_pod_start_time',
'kube_pod_labels',
'kube_poddisruptionbudget_created',
'kube_replicaset_created',
'kube_replicationcontroller_created',
'kube_resourcequota_created',
'kube_replicaset_owner',
'kube_service_created',
'kube_service_info',
'kube_service_labels',
'kube_service_spec_external_ip',
'kube_service_status_load_balancer_ingress',
'kube_statefulset_labels',
'kube_statefulset_created',
'kube_statefulset_status_current_revision',
'kube_statefulset_status_update_revision',
# Already provided by the kubelet integration
'kube_pod_container_status_last_terminated_reason',
# _generation metrics are more metadata than metrics, no real use case for now
'kube_daemonset_metadata_generation',
'kube_deployment_metadata_generation',
'kube_deployment_status_observed_generation',
'kube_replicaset_metadata_generation',
'kube_replicaset_status_observed_generation',
'kube_replicationcontroller_metadata_generation',
'kube_replicationcontroller_status_observed_generation',
'kube_statefulset_metadata_generation',
'kube_statefulset_status_observed_generation',
'kube_hpa_metadata_generation',
# kube_node_status_phase and kube_namespace_status_phase have no use case as a service check
'kube_namespace_status_phase',
'kube_node_status_phase',
# These CronJob and Job metrics need use cases to determine how do implement
'kube_cronjob_status_active',
'kube_cronjob_status_last_schedule_time',
'kube_cronjob_spec_suspend',
'kube_cronjob_spec_starting_deadline_seconds',
'kube_job_spec_active_dealine_seconds',
'kube_job_spec_completions',
'kube_job_spec_parallelism',
'kube_job_status_active',
'kube_job_status_completion_time', # We could compute the duration=completion-start as a gauge
'kube_job_status_start_time',
'kube_verticalpodautoscaler_labels',
],
'label_joins': {
'kube_pod_info': {'label_to_match': 'pod', 'labels_to_get': ['node']},
'kube_pod_status_phase': {'label_to_match': 'pod', 'labels_to_get': ['phase']},
'kube_persistentvolume_info': {
'label_to_match': 'persistentvolume',
'labels_to_get': ['storageclass'],
},
'kube_persistentvolumeclaim_info': {
'label_to_match': 'persistentvolumeclaim',
'labels_to_get': ['storageclass'],
},
},
# Defaults that were set when kubernetes_state was based on PrometheusCheck
'send_monotonic_counter': ksm_instance.get('send_monotonic_counter', False),
'health_service_check': ksm_instance.get('health_service_check', False),
}
)
ksm_instance['prometheus_url'] = endpoint
ksm_instance['label_joins'].update(extra_labels)
if hostname_override:
ksm_instance['label_to_hostname'] = 'node'
clustername = get_clustername()
if clustername != "":
ksm_instance['label_to_hostname_suffix'] = "-" + clustername
if 'labels_mapper' in ksm_instance and not isinstance(ksm_instance['labels_mapper'], dict):
self.log.warning("Option labels_mapper should be a dictionary for %s", endpoint)
return ksm_instance
def _condition_to_service_check(self, sample, sc_name, mapping, tags=None):
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1
condition = sample[self.SAMPLE_LABELS].get('condition')
if condition:
if condition in mapping:
self.service_check(sc_name, mapping[condition], tags=tags)
else:
self.log.debug("Unable to handle %s - unknown condition %s", sc_name, condition)
def _condition_to_tag_check(self, sample, base_sc_name, mapping, scraper_config, tags=None):
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1 and we are not processing the pod phase check
label_value, condition_map = self._get_metric_condition_map(base_sc_name, sample[self.SAMPLE_LABELS])
service_check_name = condition_map['service_check_name']
mapping = condition_map['mapping']
node = self._label_to_tag('node', sample[self.SAMPLE_LABELS], scraper_config)
condition = self._label_to_tag('condition', sample[self.SAMPLE_LABELS], scraper_config)
message = "{} is currently reporting {} = {}".format(node, condition, label_value)
if condition_map['service_check_name'] is None:
self.log.debug("Unable to handle %s - unknown condition %s", service_check_name, label_value)
else:
self.service_check(service_check_name, mapping[label_value], tags=tags, message=message)
def _get_metric_condition_map(self, base_sc_name, labels):
if base_sc_name == 'kubernetes_state.node':
switch = {
'Ready': {'service_check_name': base_sc_name + '.ready', 'mapping': self.condition_to_status_positive},
'OutOfDisk': {
'service_check_name': base_sc_name + '.out_of_disk',
'mapping': self.condition_to_status_negative,
},
'DiskPressure': {
'service_check_name': base_sc_name + '.disk_pressure',
'mapping': self.condition_to_status_negative,
},
'NetworkUnavailable': {
'service_check_name': base_sc_name + '.network_unavailable',
'mapping': self.condition_to_status_negative,
},
'MemoryPressure': {
'service_check_name': base_sc_name + '.memory_pressure',
'mapping': self.condition_to_status_negative,
},
}
return (
labels.get('status'),
switch.get(labels.get('condition'), {'service_check_name': None, 'mapping': None}),
)
def _format_tag(self, name, value, scraper_config):
return '%s:%s' % (scraper_config['labels_mapper'].get(name, name), to_string(value).lower())
def _label_to_tag(self, name, labels, scraper_config, tag_name=None):
value = labels.get(name)
if value:
return self._format_tag(tag_name or name, value, scraper_config)
else:
return None
def _label_to_tags(self, name, labels, scraper_config, tag_name=None):
value = labels.get(name)
tags = []
if value:
tags += self._build_tags(tag_name or name, value, scraper_config)
return tags
def _trim_job_tag(self, name):
pattern = r"(-\d{4,10}$)"
return re.sub(pattern, '', name)
def _extract_job_timestamp(self, name):
ts = name.split('-')[-1]
if ts.isdigit():
return int(ts)
else:
msg = 'Cannot extract ts from job name {}'
self.log.debug(msg, name)
return None
# Labels attached: namespace, pod
# As a message the phase=Pending|Running|Succeeded|Failed|Unknown
# From the phase the check will update its status
# Also submits as an aggregated count with minimal tags so it is
# visualisable over time per namespace and phase
def kube_pod_status_phase(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod churn,
# pod granularity available in the service checks
tags = (
self._label_to_tags('namespace', sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags('phase', sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags))
def _submit_metric_kube_pod_container_status_reason(
self, metric, metric_suffix, whitelisted_status_reasons, scraper_config
):
metric_name = scraper_config['namespace'] + metric_suffix
for sample in metric.samples:
tags = []
reason = sample[self.SAMPLE_LABELS].get('reason')
if reason:
# Filtering according to the reason here is paramount to limit cardinality
if reason.lower() in whitelisted_status_reasons:
tags += self._build_tags('reason', reason, scraper_config)
else:
continue
if 'container' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('kube_container_name', sample[self.SAMPLE_LABELS]['container'], scraper_config)
if 'namespace' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('namespace', sample[self.SAMPLE_LABELS]['namespace'], scraper_config)
if 'pod' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('pod', sample[self.SAMPLE_LABELS]['pod'], scraper_config)
self.gauge(
metric_name,
sample[self.SAMPLE_VALUE],
tags + scraper_config['custom_tags'],
hostname=self.get_hostname_for_sample(sample, scraper_config),
)
def kube_pod_container_status_waiting_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.waiting', WHITELISTED_WAITING_REASONS, scraper_config
)
def kube_pod_container_status_terminated_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.terminated', WHITELISTED_TERMINATED_REASONS, scraper_config
)
def kube_cronjob_next_schedule_time(self, metric, scraper_config):
# Used as a service check so that one can be alerted if the cronjob's next schedule is in the past
check_basename = scraper_config['namespace'] + '.cronjob.on_schedule_check'
curr_time = int(time.time())
for sample in metric.samples:
on_schedule = int(sample[self.SAMPLE_VALUE]) - curr_time
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
if on_schedule < 0:
message = "The service check scheduled at {} is {} seconds late".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(sample[self.SAMPLE_VALUE]))), on_schedule
)
self.service_check(check_basename, self.CRITICAL, tags=tags, message=message)
else:
self.service_check(check_basename, self.OK, tags=tags)
def kube_job_complete(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.OK, tags=tags + scraper_config['custom_tags'])
def kube_job_failed(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.CRITICAL, tags=tags + scraper_config['custom_tags'])
def kube_job_status_failed(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None:
self.failed_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_failed_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_job_status_succeeded(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None:
self.succeeded_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_succeeded_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_node_status_condition(self, metric, scraper_config):
base_check_name = scraper_config['namespace'] + '.node'
metric_name = scraper_config['namespace'] + '.nodes.by_condition'
by_condition_counter = Counter()
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_tag_check(
sample,
base_check_name,
self.condition_to_status_positive,
scraper_config,
tags=node_tags + scraper_config['custom_tags'],
)
tags = (
self._label_to_tags("condition", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("status", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
by_condition_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(by_condition_counter):
self.gauge(metric_name, count, tags=list(tags))
def kube_node_status_ready(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.ready'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_positive,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_out_of_disk(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.out_of_disk'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_memory_pressure(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.memory_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_disk_pressure(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.disk_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_network_unavailable(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.network_unavailable'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_spec_unschedulable(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.node.status'
statuses = ('schedulable', 'unschedulable')
if metric.type in METRIC_TYPES:
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
status = statuses[int(sample[self.SAMPLE_VALUE])]
tags += self._build_tags('status', status, scraper_config)
self.gauge(metric_name, 1, tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_resourcequota(self, metric, scraper_config):
metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}'
suffixes = {'used': 'used', 'hard': 'limit'}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
mtype = sample[self.SAMPLE_LABELS].get("type")
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("resourcequota", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_limitrange(self, metric, scraper_config):
se_name = scraper_config['namespace'] + '.limitrange.{}.{}'
constraints = {
'min': 'min',
'max': 'max',
'default': 'default',
'defaultRequest': 'default_request',
'maxLimitRequestRatio': 'max_limit_request_ratio',
}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
constraint = sample[self.SAMPLE_LABELS].get("constraint")
if constraint in constraints:
constraint = constraints[constraint]
else:
self.log.error("Constraint %s unsupported for metric %s", constraint, metric.name)
continue
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("type", sample[self.SAMPLE_LABELS], scraper_config, tag_name="consumer_type")
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, constraint), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def count_objects_by_tags(self, metric, scraper_config):
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name'])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels']
] + scraper_config['custom_tags']
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags))
def _build_tags(self, label_name, label_value, scraper_config, hostname=None):
tags = []
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
kube_tag_name = kube_labels_mapper.get(tag_name, tag_name)
label_value = to_string(label_value).lower()
tags.append('{}:{}'.format(to_string(kube_tag_name), label_value))
if self.keep_ksm_labels and (kube_tag_name != tag_name):
tags.append('{}:{}'.format(to_string(tag_name), label_value))
return tags
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags += scraper_config['_metric_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
_tags += self._build_tags(label_name, label_value, scraper_config)
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
| true | true |
f72b12c2828be5260fdd70ad443c19b16f2923f0 | 3,003 | py | Python | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime("%Y-%m-%d")}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices() | 48.435484 | 183 | 0.652681 | import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime("%Y-%m-%d")}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices() | true | true |
f72b1364a37162fb740d304ac9506ad71a4279ec | 17,209 | py | Python | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from bitsharesbase import operations
from bitsharesbase.asset_permissions import (
asset_permissions,
force_flag,
test_permissions,
todict,
)
from .blockchainobject import BlockchainObject
from .exceptions import AssetDoesNotExistsException
from .instance import BlockchainInstance
from graphenecommon.asset import Asset as GrapheneAsset
@BlockchainInstance.inject
class Asset(GrapheneAsset):
""" Deals with Assets of the network.
:param str Asset: Symbol name or object id of an asset
:param bool lazy: Lazy loading
:param bool full: Also obtain bitasset-data and dynamic asset data
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:returns: All data of an asset
:rtype: dict
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Asset.refresh()``.
"""
def define_classes(self):
self.type_id = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Permissions and flags
self["permissions"] = todict(self["options"].get("issuer_permissions"))
self["flags"] = todict(self["options"].get("flags"))
try:
self["description"] = json.loads(self["options"]["description"])
except Exception:
self["description"] = self["options"]["description"]
@property
def market_fee_percent(self):
return self["options"]["market_fee_percent"] / 100 / 100
@property
def max_market_fee(self):
from .amount import Amount
return Amount(
{"amount": self["options"]["max_market_fee"], "asset_id": self["id"]}
)
@property
def feeds(self):
from .price import PriceFeed
self.ensure_full()
if not self.is_bitasset:
return
r = []
for feed in self["bitasset_data"]["feeds"]:
r.append(PriceFeed(feed, blockchain_instance=self.blockchain))
return r
@property
def feed(self):
from .price import PriceFeed
assert self.is_bitasset
self.ensure_full()
return PriceFeed(
self["bitasset_data"]["current_feed"], blockchain_instance=self.blockchain
)
@property
def calls(self):
return self.get_call_orders(10)
def get_call_orders(self, limit=100):
from .price import Price
from .account import Account
from .amount import Amount
assert limit <= 100
assert self.is_bitasset
self.ensure_full()
r = list()
bitasset = self["bitasset_data"]
settlement_price = Price(
bitasset["current_feed"]["settlement_price"],
blockchain_instance=self.blockchain,
)
ret = self.blockchain.rpc.get_call_orders(self["id"], limit)
for call in ret[:limit]:
call_price = Price(call["call_price"], blockchain_instance=self.blockchain)
collateral_amount = Amount(
{
"amount": call["collateral"],
"asset_id": call["call_price"]["base"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
debt_amount = Amount(
{
"amount": call["debt"],
"asset_id": call["call_price"]["quote"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
r.append(
{
"account": Account(
call["borrower"], lazy=True, blockchain_instance=self.blockchain
),
"collateral": collateral_amount,
"debt": debt_amount,
"call_price": call_price,
"settlement_price": settlement_price,
"ratio": (
float(collateral_amount)
/ float(debt_amount)
* float(settlement_price)
),
}
)
return r
@property
def settlements(self):
return self.get_settle_orders(10)
def get_settle_orders(self, limit=100):
from .account import Account
from .amount import Amount
from .utils import formatTimeString
assert limit <= 100
assert self.is_bitasset
r = list()
ret = self.blockchain.rpc.get_settle_orders(self["id"], limit)
for settle in ret[:limit]:
r.append(
{
"account": Account(
settle["owner"], lazy=True, blockchain_instance=self.blockchain
),
"amount": Amount(
settle["balance"], blockchain_instance=self.blockchain
),
"date": formatTimeString(settle["settlement_date"]),
}
)
return r
def halt(self):
""" Halt this asset from being moved or traded
"""
from .account import Account
nullaccount = Account(
"null-account", # We set the null-account
blockchain_instance=self.blockchain,
)
flags = {"white_list": True, "transfer_restricted": True}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [nullaccount["id"]],
"blacklist_authorities": [],
"whitelist_markets": [self["id"]],
"blacklist_markets": [],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def release(
self,
whitelist_authorities=[],
blacklist_authorities=[],
whitelist_markets=[],
blacklist_markets=[],
):
""" Release this asset and allow unrestricted transfer, trading,
etc.
:param list whitelist_authorities: List of accounts that
serve as whitelist authorities
:param list blacklist_authorities: List of accounts that
serve as blacklist authorities
:param list whitelist_markets: List of assets to allow
trading with
:param list blacklist_markets: List of assets to prevent
trading with
"""
from .account import Account
flags = {"white_list": False, "transfer_restricted": False}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [
Account(a)["id"] for a in whitelist_authorities
],
"blacklist_authorities": [
Account(a)["id"] for a in blacklist_authorities
],
"whitelist_markets": [Asset(a)["id"] for a in whitelist_markets],
"blacklist_markets": [Asset(a)["id"] for a in blacklist_markets],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def setoptions(self, flags):
""" Enable a certain flag.
Flags:
* charge_market_fee
* white_list
* override_authority
* transfer_restricted
* disable_force_settle
* global_settle
* disable_confidential
* witness_fed_asset
* committee_fed_asset
:param dict flag: dictionary of flags and boolean
"""
assert set(flags.keys()).issubset(asset_permissions.keys()), "unknown flag"
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update({"flags": flags_int})
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def enableflag(self, flag):
""" Enable a certain flag.
:param str flag: Flag name
"""
return self.setoptions({flag: True})
def disableflag(self, flag):
""" Enable a certain flag.
:param str flag: Flag name
"""
return self.setoptions({flag: False})
def seize(self, from_account, to_account, amount):
""" Seize amount from an account and send to another
... note:: This requires the ``override_authority`` to be
set for this asset!
:param bitshares.account.Account from_account: From this account
:param bitshares.account.Account to_account: To this account
:param bitshares.amount.Amount amount: Amount to seize
"""
options = self["options"]
if not (options["flags"] & asset_permissions["override_authority"]):
raise Exception("Insufficient Permissions/flags for seizure!")
op = operations.Override_transfer(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"from": from_account["id"],
"to": to_account["id"],
"amount": amount.json(),
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_authorities(self, type, authorities=[]):
""" Add authorities to an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list authorities: List of authorities (Accounts)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
options["whitelist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
if type == "blacklist":
options["blacklist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_authorities(self, type, authorities=[]):
""" Remove authorities from an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list authorities: List of authorities (Accounts)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_authorities"].remove(Account(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_authorities"].remove(Account(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_markets(self, type, authorities=[], force_enable=True):
""" Add markets to an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list markets: List of markets (assets)
:param bool force_enable: Force enable ``white_list`` flag
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if force_enable:
test_permissions(options["issuer_permissions"], {"white_list": True})
flags_int = force_flag(options["flags"], {"white_list": True})
options.update({"flags": flags_int})
else:
assert test_permissions(
options["flags"], ["white_list"]
), "whitelist feature not enabled"
if type == "whitelist":
options["whitelist_markets"].extend([Asset(a)["id"] for a in authorities])
if type == "blacklist":
options["blacklist_markets"].extend([Asset(a)["id"] for a in authorities])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_markets(self, type, authorities=[]):
""" Remove markets from an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list markets: List of markets (assets)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_markets"].remove(Asset(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_markets"].remove(Asset(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def set_market_fee(self, percentage_fee, max_market_fee):
""" Set trading percentage fee
:param float percentage_fee: Percentage of fee
:param bitshares.amount.Amount max_market_fee: Max Fee
"""
assert percentage_fee <= 100 and percentage_fee > 0
flags = {"charge_market_fee": percentage_fee > 0}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"market_fee_percent": percentage_fee * 100,
"max_market_fee": int(max_market_fee),
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def update_feed_producers(self, producers):
""" Update bitasset feed producers
:param list producers: List of accounts that are allowed to produce
a feed
"""
assert self.is_bitasset, "Asset needs to be a bitasset/market pegged asset"
from .account import Account
op = operations.Asset_update_feed_producers(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_feed_producers": [Account(a)["id"] for a in producers],
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
| 35.192229 | 88 | 0.534895 |
import json
from bitsharesbase import operations
from bitsharesbase.asset_permissions import (
asset_permissions,
force_flag,
test_permissions,
todict,
)
from .blockchainobject import BlockchainObject
from .exceptions import AssetDoesNotExistsException
from .instance import BlockchainInstance
from graphenecommon.asset import Asset as GrapheneAsset
@BlockchainInstance.inject
class Asset(GrapheneAsset):
def define_classes(self):
self.type_id = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self["permissions"] = todict(self["options"].get("issuer_permissions"))
self["flags"] = todict(self["options"].get("flags"))
try:
self["description"] = json.loads(self["options"]["description"])
except Exception:
self["description"] = self["options"]["description"]
@property
def market_fee_percent(self):
return self["options"]["market_fee_percent"] / 100 / 100
@property
def max_market_fee(self):
from .amount import Amount
return Amount(
{"amount": self["options"]["max_market_fee"], "asset_id": self["id"]}
)
@property
def feeds(self):
from .price import PriceFeed
self.ensure_full()
if not self.is_bitasset:
return
r = []
for feed in self["bitasset_data"]["feeds"]:
r.append(PriceFeed(feed, blockchain_instance=self.blockchain))
return r
@property
def feed(self):
from .price import PriceFeed
assert self.is_bitasset
self.ensure_full()
return PriceFeed(
self["bitasset_data"]["current_feed"], blockchain_instance=self.blockchain
)
@property
def calls(self):
return self.get_call_orders(10)
def get_call_orders(self, limit=100):
from .price import Price
from .account import Account
from .amount import Amount
assert limit <= 100
assert self.is_bitasset
self.ensure_full()
r = list()
bitasset = self["bitasset_data"]
settlement_price = Price(
bitasset["current_feed"]["settlement_price"],
blockchain_instance=self.blockchain,
)
ret = self.blockchain.rpc.get_call_orders(self["id"], limit)
for call in ret[:limit]:
call_price = Price(call["call_price"], blockchain_instance=self.blockchain)
collateral_amount = Amount(
{
"amount": call["collateral"],
"asset_id": call["call_price"]["base"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
debt_amount = Amount(
{
"amount": call["debt"],
"asset_id": call["call_price"]["quote"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
r.append(
{
"account": Account(
call["borrower"], lazy=True, blockchain_instance=self.blockchain
),
"collateral": collateral_amount,
"debt": debt_amount,
"call_price": call_price,
"settlement_price": settlement_price,
"ratio": (
float(collateral_amount)
/ float(debt_amount)
* float(settlement_price)
),
}
)
return r
@property
def settlements(self):
return self.get_settle_orders(10)
def get_settle_orders(self, limit=100):
from .account import Account
from .amount import Amount
from .utils import formatTimeString
assert limit <= 100
assert self.is_bitasset
r = list()
ret = self.blockchain.rpc.get_settle_orders(self["id"], limit)
for settle in ret[:limit]:
r.append(
{
"account": Account(
settle["owner"], lazy=True, blockchain_instance=self.blockchain
),
"amount": Amount(
settle["balance"], blockchain_instance=self.blockchain
),
"date": formatTimeString(settle["settlement_date"]),
}
)
return r
def halt(self):
from .account import Account
nullaccount = Account(
"null-account",
blockchain_instance=self.blockchain,
)
flags = {"white_list": True, "transfer_restricted": True}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [nullaccount["id"]],
"blacklist_authorities": [],
"whitelist_markets": [self["id"]],
"blacklist_markets": [],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def release(
self,
whitelist_authorities=[],
blacklist_authorities=[],
whitelist_markets=[],
blacklist_markets=[],
):
from .account import Account
flags = {"white_list": False, "transfer_restricted": False}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [
Account(a)["id"] for a in whitelist_authorities
],
"blacklist_authorities": [
Account(a)["id"] for a in blacklist_authorities
],
"whitelist_markets": [Asset(a)["id"] for a in whitelist_markets],
"blacklist_markets": [Asset(a)["id"] for a in blacklist_markets],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def setoptions(self, flags):
assert set(flags.keys()).issubset(asset_permissions.keys()), "unknown flag"
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update({"flags": flags_int})
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def enableflag(self, flag):
return self.setoptions({flag: True})
def disableflag(self, flag):
return self.setoptions({flag: False})
def seize(self, from_account, to_account, amount):
options = self["options"]
if not (options["flags"] & asset_permissions["override_authority"]):
raise Exception("Insufficient Permissions/flags for seizure!")
op = operations.Override_transfer(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"from": from_account["id"],
"to": to_account["id"],
"amount": amount.json(),
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_authorities(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
options["whitelist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
if type == "blacklist":
options["blacklist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_authorities(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_authorities"].remove(Account(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_authorities"].remove(Account(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_markets(self, type, authorities=[], force_enable=True):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if force_enable:
test_permissions(options["issuer_permissions"], {"white_list": True})
flags_int = force_flag(options["flags"], {"white_list": True})
options.update({"flags": flags_int})
else:
assert test_permissions(
options["flags"], ["white_list"]
), "whitelist feature not enabled"
if type == "whitelist":
options["whitelist_markets"].extend([Asset(a)["id"] for a in authorities])
if type == "blacklist":
options["blacklist_markets"].extend([Asset(a)["id"] for a in authorities])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_markets(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_markets"].remove(Asset(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_markets"].remove(Asset(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def set_market_fee(self, percentage_fee, max_market_fee):
assert percentage_fee <= 100 and percentage_fee > 0
flags = {"charge_market_fee": percentage_fee > 0}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"market_fee_percent": percentage_fee * 100,
"max_market_fee": int(max_market_fee),
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def update_feed_producers(self, producers):
assert self.is_bitasset, "Asset needs to be a bitasset/market pegged asset"
from .account import Account
op = operations.Asset_update_feed_producers(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_feed_producers": [Account(a)["id"] for a in producers],
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
| true | true |
f72b17196b95f01f3e9c02c59d337099f3b510e2 | 18,401 | py | Python | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | 1 | 2020-12-02T09:51:29.000Z | 2020-12-02T09:51:29.000Z | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | null | null | null | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import os
import logging
import time
import tensorflow.compat.v1 as tf
from tensorflow.compat import as_str_any
from tensorflow.compat.v1.train import Optimizer
from tensorflow.compat.v1.estimator import ModeKeys
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from fedlearner.common.mysql_client import DBClient
from fedlearner.common.summary_hook import SummaryHook
from fedlearner.trainer import patch # pylint: disable=unused-import
from fedlearner.common import metrics
from fedlearner.data_join.common import get_kvstore_config
SYNC_PATH = '/sync/'
DATA_CHECKPOINT_INIT_VALUE = "_init_value"
class DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener):
def __init__(self, tm, appid):
self._trainer_master = tm
self._application_id = appid
def begin(self):
ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \
name="data_checkpoint")
self._ckpt_tensor = var_tmp.assign(ckpt)
def before_save(self, session, global_step_value):
logging.info('About to write a checkpoint at step %d', \
global_step_value)
data_checkpoint = self._trainer_master.get_data_block_checkpoint(
self._application_id)
#if empty block from checkpoint fetched due to exception or
# master not ready, no need to save.
if len(data_checkpoint) == 0:
return
res = session.run(self._ckpt_tensor, {"data_checkpoint_plhd:0":
",".join(data_checkpoint)})
logging.info("data checkpoint saved result: %s", res)
class FLModel(object):
def __init__(self, role, bridge, example_ids, exporting=False):
self._role = role
self._bridge = bridge
self._example_ids = example_ids
self._exporting = exporting
self._train_ops = []
self._recvs = []
self._sends = []
self._outputs = []
@property
def train_ops(self):
return self._train_ops
@property
def sends(self):
return [(n, t) for n, t, _ in self._sends]
@property
def recvs(self):
return [(n, t) for n, t, _ in self._recvs]
def verify_example_ids(self):
tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)
if self._role == 'leader':
self.send('_verify_example_ids', tensor)
else:
recv_tensor = self.recv('_verify_example_ids', tensor.dtype)
op = tf.assert_equal(tensor, recv_tensor)
self._train_ops.append(op)
def send(self, name, tensor, require_grad=False):
with tf.control_dependencies([self._example_ids]):
op = self._bridge.send_op(name, tensor)
self._train_ops.append(op)
self._sends.append((name, tensor, require_grad))
if require_grad:
return self.recv(name + '_grad', tensor.dtype)
return None
def recv(self, name, dtype=tf.float32, require_grad=False):
with tf.control_dependencies([self._example_ids]):
tensor = self._bridge.receive_op(name, dtype)
self._recvs.append((name, tensor, require_grad))
return tensor
def minimize(self,
optimizer,
loss,
global_step=None,
var_list=None,
gate_gradients=Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
recv_grads = [i for i in self._recvs if i[2]]
if var_list is None:
var_list = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
var_list = [v for _, v, _ in recv_grads] + var_list
grads_and_vars = optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
send_grads = grads_and_vars[:len(recv_grads)]
for (n, _, _), (grad, _) in zip(recv_grads, send_grads):
if grad is not None:
self.send(n + '_grad', grad)
if grads_and_vars[len(recv_grads):]:
train_op = optimizer.apply_gradients(
grads_and_vars[len(recv_grads):],
global_step=global_step,
name=name)
else:
train_op = tf.no_op()
return train_op
def _append_summary_hook(self, training_hooks):
if not training_hooks:
training_hooks = []
summary_hook = SummaryHook.get_hook()
if summary_hook:
training_hooks.append(summary_hook)
return training_hooks
def make_spec(self,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
training_chief_hooks=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
if isinstance(predictions, tf.Tensor):
predictions = {'output': predictions}
if mode == ModeKeys.TRAIN:
train_op = tf.group([train_op] + self._train_ops)
training_hooks = self._append_summary_hook(training_hooks)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
class FLEstimator(object):
def __init__(self,
model_fn,
bridge,
trainer_master,
role,
worker_rank=0,
application_id=None,
cluster_spec=None):
self._model_fn = model_fn
self._bridge = bridge
self._trainer_master = trainer_master
self._role = role
self._worker_rank = worker_rank
self._cluster_spec = cluster_spec
self._application_id = application_id
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
dataset = input_fn(self._bridge, self._trainer_master)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def _get_model_spec(self, features, labels, mode):
model = FLModel(self._role, self._bridge,
features.get('example_id', None),
exporting=(mode == ModeKeys.PREDICT))
spec = self._model_fn(model, features, labels, mode)
return spec, model
def _restore_datablock(self, blk_ids):
# only chief worker restores from checkpoint.
if self._worker_rank != 0 or blk_ids is None:
return True
block_id_str = as_str_any(blk_ids)
block_ids = []
if block_id_str != DATA_CHECKPOINT_INIT_VALUE:
block_ids = block_id_str.split(",")
logging.info("restore: %s", block_id_str)
return self._trainer_master.restore_data_block_checkpoint(
self._application_id, block_ids)
def _cheif_barriar(self, is_chief=False, sync_times=300):
worker_replicas = os.environ.get('REPLICA_NUM', 0)
kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd')
db_database, db_addr, db_username, db_password, _ = \
get_kvstore_config(kvstore_type)
kvstore_client = DBClient(db_database,
db_addr,
db_username,
db_password,
SYNC_PATH)
sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],
os.environ['WORKER_RANK'])
logging.info('Creating a sync flag at %s', sync_path)
kvstore_client.set_data(sync_path, "1")
if is_chief:
for _ in range(sync_times):
sync_list = kvstore_client.get_prefix_kvs(
os.environ['APPLICATION_ID'])
logging.info('Sync file pattern is: %s', sync_list)
if len(sync_list) < worker_replicas:
logging.info('Count of ready workers is %d',
len(sync_list))
time.sleep(6)
else:
break
def train(self,
input_fn,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None):
if self._cluster_spec is not None:
device_fn = tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % self._worker_rank,
merge_devices=True,
cluster=self._cluster_spec)
cluster_def = self._cluster_spec.as_cluster_def()
local_address = self._cluster_spec.job_tasks('worker')[
self._worker_rank]
server = tf.train.Server(tf.train.ClusterSpec(
{'local': {
0: local_address
}}),
job_name='local',
task_index=0)
target = 'grpc://' + local_address
else:
device_fn = None
cluster_def = None
target = None
config = tf.ConfigProto(cluster_def=cluster_def)
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
config.experimental.share_session_state_in_clusterspec_propagation \
= True
tf.config.set_soft_device_placement(False)
with tf.Graph().as_default() as g:
with tf.device(device_fn):
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.TRAIN)
spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)
# Explicitly add a Saver
if not tf.get_collection(tf.GraphKeys.SAVERS):
saver = tf.train.Saver(
sharded=True,
defer_build=True,
save_relative_paths=True) # Must set for portability
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
listener = DataCheckpointSaverListener(self._trainer_master,
self._application_id)
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_path, save_secs=save_checkpoint_secs,
save_steps=save_checkpoint_steps, listeners=[listener])
self._bridge.connect()
try:
with tf.train.MonitoredTrainingSession(
master=target,
config=config,
is_chief=(self._worker_rank == 0),
chief_only_hooks=[saver_hook],
checkpoint_dir=checkpoint_path,
save_checkpoint_steps=save_checkpoint_steps,
save_checkpoint_secs=save_checkpoint_secs,
hooks=spec.training_hooks) as sess:
iter_id = 0
data_checkpoint_value = None
if hasattr(saver_hook, "data_checkpoint"):
data_checkpoint_value = saver_hook.data_checkpoint
if not self._restore_datablock(data_checkpoint_value):
raise ValueError("Restore data checkpoint error")
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(spec.train_op, feed_dict={})
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
return self
def evaluate(self,
input_fn,
checkpoint_path=None):
if not tf.train.latest_checkpoint(checkpoint_path):
raise ValueError(
"Could not find trained model at %s" % checkpoint_path)
with tf.Graph().as_default():
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.EVAL)
spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)
# Track the average loss in default
eval_metric_ops = spec.eval_metric_ops or {}
if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:
loss_metric = tf.metrics.mean(spec.loss)
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
# Create the real eval op
update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)
update_ops.extend(model._train_ops)
eval_op = tf.group(*update_ops)
# Also track the global step
if tf.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because '
'Estimator already defines a default metric with the '
'same name.')
eval_dict[tf.GraphKeys.GLOBAL_STEP] = \
tf.train.get_or_create_global_step()
# Prepare the session creator.
scaffold = tf.train.Scaffold()
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_path)
# Prepare hooks
all_hooks = list(spec.evaluation_hooks) or []
final_ops_hook = tf.train.FinalOpsHook(eval_dict)
all_hooks.append(final_ops_hook)
# Evaluate over dataset
self._bridge.connect()
try:
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=all_hooks) as sess:
if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE):
raise ValueError("Restore data checkpoint error")
iter_id = 0
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(eval_op)
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
# Print result
logging.info('Metrics for iteration %d: %s',
iter_id, _dict_to_str(final_ops_hook.final_ops_values))
return final_ops_hook.final_ops_values
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
checkpoint_path=None):
with tf.Graph().as_default():
receiver = serving_input_receiver_fn()
spec, model = self._get_model_spec(receiver.features, None,
ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session() as sess:
saver_for_restore = tf.train.Saver(sharded=True)
saver_for_restore.restore(
sess, tf.train.latest_checkpoint(checkpoint_path))
tf.saved_model.simple_save(sess, export_dir_base,
receiver.receiver_tensors,
spec.predictions, None)
return export_dir_base
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name in sorted(eval_dict.keys()):
metric_tensor, update_op = eval_dict[name]
value_ops[name] = metric_tensor
update_ops.append(update_op)
return update_ops, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(dictionary.items())
if not isinstance(v, bytes))
| 39.915401 | 80 | 0.577469 |
import os
import logging
import time
import tensorflow.compat.v1 as tf
from tensorflow.compat import as_str_any
from tensorflow.compat.v1.train import Optimizer
from tensorflow.compat.v1.estimator import ModeKeys
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from fedlearner.common.mysql_client import DBClient
from fedlearner.common.summary_hook import SummaryHook
from fedlearner.trainer import patch
from fedlearner.common import metrics
from fedlearner.data_join.common import get_kvstore_config
SYNC_PATH = '/sync/'
DATA_CHECKPOINT_INIT_VALUE = "_init_value"
class DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener):
def __init__(self, tm, appid):
self._trainer_master = tm
self._application_id = appid
def begin(self):
ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \
name="data_checkpoint")
self._ckpt_tensor = var_tmp.assign(ckpt)
def before_save(self, session, global_step_value):
logging.info('About to write a checkpoint at step %d', \
global_step_value)
data_checkpoint = self._trainer_master.get_data_block_checkpoint(
self._application_id)
if len(data_checkpoint) == 0:
return
res = session.run(self._ckpt_tensor, {"data_checkpoint_plhd:0":
",".join(data_checkpoint)})
logging.info("data checkpoint saved result: %s", res)
class FLModel(object):
def __init__(self, role, bridge, example_ids, exporting=False):
self._role = role
self._bridge = bridge
self._example_ids = example_ids
self._exporting = exporting
self._train_ops = []
self._recvs = []
self._sends = []
self._outputs = []
@property
def train_ops(self):
return self._train_ops
@property
def sends(self):
return [(n, t) for n, t, _ in self._sends]
@property
def recvs(self):
return [(n, t) for n, t, _ in self._recvs]
def verify_example_ids(self):
tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)
if self._role == 'leader':
self.send('_verify_example_ids', tensor)
else:
recv_tensor = self.recv('_verify_example_ids', tensor.dtype)
op = tf.assert_equal(tensor, recv_tensor)
self._train_ops.append(op)
def send(self, name, tensor, require_grad=False):
with tf.control_dependencies([self._example_ids]):
op = self._bridge.send_op(name, tensor)
self._train_ops.append(op)
self._sends.append((name, tensor, require_grad))
if require_grad:
return self.recv(name + '_grad', tensor.dtype)
return None
def recv(self, name, dtype=tf.float32, require_grad=False):
with tf.control_dependencies([self._example_ids]):
tensor = self._bridge.receive_op(name, dtype)
self._recvs.append((name, tensor, require_grad))
return tensor
def minimize(self,
optimizer,
loss,
global_step=None,
var_list=None,
gate_gradients=Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
recv_grads = [i for i in self._recvs if i[2]]
if var_list is None:
var_list = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
var_list = [v for _, v, _ in recv_grads] + var_list
grads_and_vars = optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
send_grads = grads_and_vars[:len(recv_grads)]
for (n, _, _), (grad, _) in zip(recv_grads, send_grads):
if grad is not None:
self.send(n + '_grad', grad)
if grads_and_vars[len(recv_grads):]:
train_op = optimizer.apply_gradients(
grads_and_vars[len(recv_grads):],
global_step=global_step,
name=name)
else:
train_op = tf.no_op()
return train_op
def _append_summary_hook(self, training_hooks):
if not training_hooks:
training_hooks = []
summary_hook = SummaryHook.get_hook()
if summary_hook:
training_hooks.append(summary_hook)
return training_hooks
def make_spec(self,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
training_chief_hooks=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
if isinstance(predictions, tf.Tensor):
predictions = {'output': predictions}
if mode == ModeKeys.TRAIN:
train_op = tf.group([train_op] + self._train_ops)
training_hooks = self._append_summary_hook(training_hooks)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
class FLEstimator(object):
def __init__(self,
model_fn,
bridge,
trainer_master,
role,
worker_rank=0,
application_id=None,
cluster_spec=None):
self._model_fn = model_fn
self._bridge = bridge
self._trainer_master = trainer_master
self._role = role
self._worker_rank = worker_rank
self._cluster_spec = cluster_spec
self._application_id = application_id
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
dataset = input_fn(self._bridge, self._trainer_master)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def _get_model_spec(self, features, labels, mode):
model = FLModel(self._role, self._bridge,
features.get('example_id', None),
exporting=(mode == ModeKeys.PREDICT))
spec = self._model_fn(model, features, labels, mode)
return spec, model
def _restore_datablock(self, blk_ids):
if self._worker_rank != 0 or blk_ids is None:
return True
block_id_str = as_str_any(blk_ids)
block_ids = []
if block_id_str != DATA_CHECKPOINT_INIT_VALUE:
block_ids = block_id_str.split(",")
logging.info("restore: %s", block_id_str)
return self._trainer_master.restore_data_block_checkpoint(
self._application_id, block_ids)
def _cheif_barriar(self, is_chief=False, sync_times=300):
worker_replicas = os.environ.get('REPLICA_NUM', 0)
kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd')
db_database, db_addr, db_username, db_password, _ = \
get_kvstore_config(kvstore_type)
kvstore_client = DBClient(db_database,
db_addr,
db_username,
db_password,
SYNC_PATH)
sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],
os.environ['WORKER_RANK'])
logging.info('Creating a sync flag at %s', sync_path)
kvstore_client.set_data(sync_path, "1")
if is_chief:
for _ in range(sync_times):
sync_list = kvstore_client.get_prefix_kvs(
os.environ['APPLICATION_ID'])
logging.info('Sync file pattern is: %s', sync_list)
if len(sync_list) < worker_replicas:
logging.info('Count of ready workers is %d',
len(sync_list))
time.sleep(6)
else:
break
def train(self,
input_fn,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None):
if self._cluster_spec is not None:
device_fn = tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % self._worker_rank,
merge_devices=True,
cluster=self._cluster_spec)
cluster_def = self._cluster_spec.as_cluster_def()
local_address = self._cluster_spec.job_tasks('worker')[
self._worker_rank]
server = tf.train.Server(tf.train.ClusterSpec(
{'local': {
0: local_address
}}),
job_name='local',
task_index=0)
target = 'grpc://' + local_address
else:
device_fn = None
cluster_def = None
target = None
config = tf.ConfigProto(cluster_def=cluster_def)
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
config.experimental.share_session_state_in_clusterspec_propagation \
= True
tf.config.set_soft_device_placement(False)
with tf.Graph().as_default() as g:
with tf.device(device_fn):
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.TRAIN)
spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)
if not tf.get_collection(tf.GraphKeys.SAVERS):
saver = tf.train.Saver(
sharded=True,
defer_build=True,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
listener = DataCheckpointSaverListener(self._trainer_master,
self._application_id)
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_path, save_secs=save_checkpoint_secs,
save_steps=save_checkpoint_steps, listeners=[listener])
self._bridge.connect()
try:
with tf.train.MonitoredTrainingSession(
master=target,
config=config,
is_chief=(self._worker_rank == 0),
chief_only_hooks=[saver_hook],
checkpoint_dir=checkpoint_path,
save_checkpoint_steps=save_checkpoint_steps,
save_checkpoint_secs=save_checkpoint_secs,
hooks=spec.training_hooks) as sess:
iter_id = 0
data_checkpoint_value = None
if hasattr(saver_hook, "data_checkpoint"):
data_checkpoint_value = saver_hook.data_checkpoint
if not self._restore_datablock(data_checkpoint_value):
raise ValueError("Restore data checkpoint error")
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(spec.train_op, feed_dict={})
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
return self
def evaluate(self,
input_fn,
checkpoint_path=None):
if not tf.train.latest_checkpoint(checkpoint_path):
raise ValueError(
"Could not find trained model at %s" % checkpoint_path)
with tf.Graph().as_default():
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.EVAL)
spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)
eval_metric_ops = spec.eval_metric_ops or {}
if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:
loss_metric = tf.metrics.mean(spec.loss)
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)
update_ops.extend(model._train_ops)
eval_op = tf.group(*update_ops)
if tf.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because '
'Estimator already defines a default metric with the '
'same name.')
eval_dict[tf.GraphKeys.GLOBAL_STEP] = \
tf.train.get_or_create_global_step()
scaffold = tf.train.Scaffold()
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_path)
all_hooks = list(spec.evaluation_hooks) or []
final_ops_hook = tf.train.FinalOpsHook(eval_dict)
all_hooks.append(final_ops_hook)
self._bridge.connect()
try:
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=all_hooks) as sess:
if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE):
raise ValueError("Restore data checkpoint error")
iter_id = 0
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(eval_op)
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
logging.info('Metrics for iteration %d: %s',
iter_id, _dict_to_str(final_ops_hook.final_ops_values))
return final_ops_hook.final_ops_values
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
checkpoint_path=None):
with tf.Graph().as_default():
receiver = serving_input_receiver_fn()
spec, model = self._get_model_spec(receiver.features, None,
ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session() as sess:
saver_for_restore = tf.train.Saver(sharded=True)
saver_for_restore.restore(
sess, tf.train.latest_checkpoint(checkpoint_path))
tf.saved_model.simple_save(sess, export_dir_base,
receiver.receiver_tensors,
spec.predictions, None)
return export_dir_base
def _extract_metric_update_ops(eval_dict):
update_ops = []
value_ops = {}
for name in sorted(eval_dict.keys()):
metric_tensor, update_op = eval_dict[name]
value_ops[name] = metric_tensor
update_ops.append(update_op)
return update_ops, value_ops
def _dict_to_str(dictionary):
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(dictionary.items())
if not isinstance(v, bytes))
| true | true |
f72b173c37bf64ae1456501212bb02ffe852962a | 2,398 | py | Python | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetRegistrationActivationKeyResult',
'AwaitableGetRegistrationActivationKeyResult',
'get_registration_activation_key',
]
@pulumi.output_type
class GetRegistrationActivationKeyResult:
"""
The resource containing the Azure Stack activation key.
"""
def __init__(__self__, activation_key=None):
if activation_key and not isinstance(activation_key, str):
raise TypeError("Expected argument 'activation_key' to be a str")
pulumi.set(__self__, "activation_key", activation_key)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> Optional[str]:
"""
Azure Stack activation key.
"""
return pulumi.get(self, "activation_key")
class AwaitableGetRegistrationActivationKeyResult(GetRegistrationActivationKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistrationActivationKeyResult(
activation_key=self.activation_key)
def get_registration_activation_key(registration_name: Optional[str] = None,
resource_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistrationActivationKeyResult:
"""
The resource containing the Azure Stack activation key.
:param str registration_name: Name of the Azure Stack registration.
:param str resource_group: Name of the resource group.
"""
__args__ = dict()
__args__['registrationName'] = registration_name
__args__['resourceGroup'] = resource_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestack/v20200601preview:getRegistrationActivationKey', __args__, opts=opts, typ=GetRegistrationActivationKeyResult).value
return AwaitableGetRegistrationActivationKeyResult(
activation_key=__ret__.activation_key)
| 36.333333 | 175 | 0.710592 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetRegistrationActivationKeyResult',
'AwaitableGetRegistrationActivationKeyResult',
'get_registration_activation_key',
]
@pulumi.output_type
class GetRegistrationActivationKeyResult:
def __init__(__self__, activation_key=None):
if activation_key and not isinstance(activation_key, str):
raise TypeError("Expected argument 'activation_key' to be a str")
pulumi.set(__self__, "activation_key", activation_key)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> Optional[str]:
return pulumi.get(self, "activation_key")
class AwaitableGetRegistrationActivationKeyResult(GetRegistrationActivationKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistrationActivationKeyResult(
activation_key=self.activation_key)
def get_registration_activation_key(registration_name: Optional[str] = None,
resource_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistrationActivationKeyResult:
__args__ = dict()
__args__['registrationName'] = registration_name
__args__['resourceGroup'] = resource_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestack/v20200601preview:getRegistrationActivationKey', __args__, opts=opts, typ=GetRegistrationActivationKeyResult).value
return AwaitableGetRegistrationActivationKeyResult(
activation_key=__ret__.activation_key)
| true | true |
f72b176a16e94f285b596a275b3c38e265d42aba | 11,711 | py | Python | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 11 | 2019-11-22T16:46:36.000Z | 2021-07-17T04:06:14.000Z | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 3 | 2019-11-11T05:40:10.000Z | 2020-03-05T14:04:38.000Z | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 3 | 2020-04-04T12:21:52.000Z | 2022-02-27T13:29:45.000Z | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.enc_rnn_size, opt.dropout, embeddings,
opt.bridge)
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
if model_opt.model_type == 'text' and \
model_opt.enc_rnn_size != model_opt.dec_rnn_size:
raise AssertionError("""We do not support different encoder and
decoder rnn sizes for translation now.""")
# Build encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
# Build decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.dec_rnn_size,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = \
{fix_key(k): v for (k, v) in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
""" Build the Model """
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model
| 40.663194 | 79 | 0.59252 | import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
def build_encoder(opt, embeddings):
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.enc_rnn_size, opt.dropout, embeddings,
opt.bridge)
def build_decoder(opt, embeddings):
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
if model_opt.model_type == 'text' and \
model_opt.enc_rnn_size != model_opt.dec_rnn_size:
raise AssertionError("""We do not support different encoder and
decoder rnn sizes for translation now.""")
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
if model_opt.share_embeddings:
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.dec_rnn_size,
fields["tgt"].vocab)
if checkpoint is not None:
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = \
{fix_key(k): v for (k, v) in checkpoint['model'].items()}
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model
| true | true |
f72b18ac7bf95dbe78dbadf8c1485e348aca0705 | 870 | py | Python | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | """Convert a .wav file to .csv
Uses the `wave` package to convert a .wav file to a .csv.
Assumes that the file is monoaural (one channel).
Be sure to edit the code to point to correct values of `inFileName` and `outFileName`
"""
import wave
import numpy
inFileName = "../data/pingpong.wav"
outFileName = '../data/pingpong raw redux.csv'
f = wave.open(inFileName, 'rb')
params = f.getparams()
print("There are {} frames.".format(params.nframes))
bytesData = f.readframes(params.nframes)
f.close()
a = numpy.frombuffer(bytesData, dtype=numpy.dtype('i2')) # answer is an ndarray
i = 0
with open(outFileName, 'w') as out:
out.write('time, sound\n')
for val in a:
time = 1000 * i / params.framerate # milliseconds
theLine = '{:g}, {:g}\n'.format(time, val)
out.write(theLine)
i += 1
print("Wrote {} frames.".format(i))
| 22.894737 | 85 | 0.658621 |
import wave
import numpy
inFileName = "../data/pingpong.wav"
outFileName = '../data/pingpong raw redux.csv'
f = wave.open(inFileName, 'rb')
params = f.getparams()
print("There are {} frames.".format(params.nframes))
bytesData = f.readframes(params.nframes)
f.close()
a = numpy.frombuffer(bytesData, dtype=numpy.dtype('i2'))
i = 0
with open(outFileName, 'w') as out:
out.write('time, sound\n')
for val in a:
time = 1000 * i / params.framerate
theLine = '{:g}, {:g}\n'.format(time, val)
out.write(theLine)
i += 1
print("Wrote {} frames.".format(i))
| true | true |
f72b18b4de5b0fdf2cba2aac9ddd50531ba9f7c0 | 2,145 | py | Python | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from os.path import dirname, realpath, join
from setuptools import setup, find_packages
import sys
####
# Basic project info.
####
project_name = 'short-con'
package_name = project_name.replace('-', '_')
repo_name = project_name
description = 'Constants collections without boilerplate'
url = 'https://github.com/hindman/' + repo_name
author = 'Monty Hindman'
author_email = 'mhindman@gmail.com'
license = 'MIT'
src_subdir = 'src'
project_dir = dirname(realpath(__file__))
####
# Requirements.
####
reqs = [
'attrs',
'six',
]
extras = {
'test' : [
'pytest',
'pytest-cov',
'tox',
],
'dev' : [
'invoke',
'ipython' if sys.version_info.major > 2 else 'ipython<6.0',
'pycodestyle',
'twine',
'virtualenv',
'virtualenvwrapper',
],
}
####
# Set __version__, long description, and classifiers.
####
version_file = join(project_dir, src_subdir, package_name, 'version.py')
exec(open(version_file).read())
readme_file = join(project_dir, 'README.md')
long_desc = open(readme_file).read()
long_desc_type = 'text/markdown'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
]
####
# Packages and scripts.
####
packages = find_packages(where = src_subdir)
package_data = {
package_name: [],
}
####
# Install.
####
setup(
name = project_name,
version = __version__,
author = author,
author_email = author_email,
url = url,
description = description,
zip_safe = False,
packages = packages,
package_dir = {'': src_subdir},
package_data = package_data,
install_requires = reqs,
tests_require = extras['test'],
extras_require = extras,
license = license,
long_description = long_desc,
long_description_content_type = long_desc_type,
classifiers = classifiers,
)
| 21.029412 | 72 | 0.635897 |
from os.path import dirname, realpath, join
from setuptools import setup, find_packages
import sys
name = 'short-con'
package_name = project_name.replace('-', '_')
repo_name = project_name
description = 'Constants collections without boilerplate'
url = 'https://github.com/hindman/' + repo_name
author = 'Monty Hindman'
author_email = 'mhindman@gmail.com'
license = 'MIT'
src_subdir = 'src'
project_dir = dirname(realpath(__file__))
'attrs',
'six',
]
extras = {
'test' : [
'pytest',
'pytest-cov',
'tox',
],
'dev' : [
'invoke',
'ipython' if sys.version_info.major > 2 else 'ipython<6.0',
'pycodestyle',
'twine',
'virtualenv',
'virtualenvwrapper',
],
}
file = join(project_dir, src_subdir, package_name, 'version.py')
exec(open(version_file).read())
readme_file = join(project_dir, 'README.md')
long_desc = open(readme_file).read()
long_desc_type = 'text/markdown'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
]
= find_packages(where = src_subdir)
package_data = {
package_name: [],
}
name = project_name,
version = __version__,
author = author,
author_email = author_email,
url = url,
description = description,
zip_safe = False,
packages = packages,
package_dir = {'': src_subdir},
package_data = package_data,
install_requires = reqs,
tests_require = extras['test'],
extras_require = extras,
license = license,
long_description = long_desc,
long_description_content_type = long_desc_type,
classifiers = classifiers,
)
| true | true |
f72b19bcddea7c052af0ab512ac1b3f2f93a86bf | 112,844 | py | Python | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 1 | 2019-01-14T07:11:06.000Z | 2019-01-14T07:11:06.000Z | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import functools
import os
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def default_variable_creator(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def default_variable_creator_v2(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def _make_getter(captured_getter, captured_previous):
"""To avoid capturing loop variables."""
def getter(**kwargs):
return captured_getter(captured_previous, **kwargs)
return getter
def _has_cycle(op, path):
"""Detect cycles in the dependencies of `initial_value`."""
if op.name in path:
return True
path.add(op.name)
for op_input in op.inputs:
if _has_cycle(op_input.op, path):
return True
for op_control_input in op.control_inputs:
if _has_cycle(op_control_input, path):
return True
path.remove(op.name)
return False
@tf_export("VariableSynchronization")
class VariableSynchronization(enum.Enum):
"""Indicates when a distributed variable will be synced.
* `AUTO`: Indicates that the synchronization will be determined by the current
`DistributionStrategy` (eg. With `MirroredStrategy` this would be
`ON_WRITE`).
* `NONE`: Indicates that there will only be one copy of the variable, so
there is no need to sync.
* `ON_WRITE`: Indicates that the variable will be updated across devices
every time it is written.
* `ON_READ`: Indicates that the variable will be aggregated across devices
when it is read (eg. when checkpointing or when evaluating an op that uses
the variable).
"""
AUTO = 0
NONE = 1
ON_WRITE = 2
ON_READ = 3
@tf_export("VariableAggregation", v1=[])
class VariableAggregationV2(enum.Enum):
"""Indicates how a distributed variable will be aggregated.
`tf.contrib.distribute.DistributionStrategy` distributes a model by making
multiple copies (called "replicas") acting data-parallel on different elements
of the input batch. When performing some variable-update operation, say
`var.assign_add(x)`, in a model, we need to resolve how to combine the
different values for `x` computed in the different replicas.
* `NONE`: This is the default, giving an error if you use a
variable-update operation with multiple replicas.
* `SUM`: Add the updates across replicas.
* `MEAN`: Take the arithmetic mean ("average") of the updates across replicas.
* `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same
update, but we only want to perform the update once. Used, e.g., for the
global step counter.
"""
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
@tf_export(v1=["VariableAggregation"])
class VariableAggregation(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
ONLY_FIRST_TOWER = 3 # DEPRECATED
VariableAggregation.__doc__ = (
VariableAggregationV2.__doc__ +
"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n ")
class VariableMetaclass(type):
"""Metaclass to allow construction of tf.Variable to be overridden."""
def _variable_v1_call(cls,
initial_value=None,
trainable=None,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Call on Variable class. Useful to force the signature."""
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
# Reset `aggregation` that is explicitly set as `None` to the enum NONE.
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
expected_shape=expected_shape,
import_scope=import_scope,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _variable_v2_call(cls,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Call on Variable class. Useful to force the signature."""
previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
# Reset `aggregation` that is explicitly set as `None` to the enum NONE.
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
import_scope=import_scope,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def __call__(cls, *args, **kwargs):
if cls is VariableV1:
return cls._variable_v1_call(*args, **kwargs)
elif cls is Variable:
return cls._variable_v2_call(*args, **kwargs)
else:
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
@tf_export("Variable", v1=[])
class Variable(six.with_metaclass(VariableMetaclass,
checkpointable.CheckpointableBase)):
"""See the [Variables Guide](https://tensorflow.org/guide/variables).
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
WARNING: tf.Variable objects by default have a non-intuitive memory model. A
Variable is represented internally as a mutable Tensor which can
non-deterministically alias other Tensors in a graph. The set of operations
which consume a Variable and can lead to aliasing is undetermined and can
change across TensorFlow versions. Avoid writing code which relies on the
value of a Variable either changing or not changing as other operations
happen. For example, using Variable objects or simple functions thereof as
predicates in a `tf.cond` is dangerous and error-prone:
```
v = tf.Variable(True)
tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.
```
Here replacing adding `use_resource=True` when constructing the variable will
fix any nondeterminism issues:
```
v = tf.Variable(True, use_resource=True)
tf.cond(v, lambda: v.assign(False), my_false_fn)
```
To use the replacement for variables which does
not have these issues:
* Add `use_resource=True` when constructing `tf.Variable`;
* Call `tf.get_variable_scope().set_use_resource(True)` inside a
`tf.variable_scope` before the `tf.get_variable()` call.
"""
def __init__(self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch uses
of this variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
raise NotImplementedError
@property
def trainable(self):
raise NotImplementedError
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
raise NotImplementedError
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
raise NotImplementedError
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
raise NotImplementedError
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
raise NotImplementedError
def assign(self, value, use_locking=False, name=None, read_value=True):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
raise NotImplementedError
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
raise NotImplementedError
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
raise NotImplementedError
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_assign(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
raise NotImplementedError
def load(self, value, session=None):
"""Load new value into this variable.
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
raise NotImplementedError
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
@classmethod
def _OverloadAllOperators(cls): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
cls._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(cls, "__getitem__", array_ops._SliceHelperVar)
@classmethod
def _OverloadOperator(cls, operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
tensor_oper = getattr(ops.Tensor, operator)
def _run_op(a, *args, **kwargs):
# pylint: disable=protected-access
return tensor_oper(a._AsTensor(), *args, **kwargs)
functools.update_wrapper(_run_op, tensor_oper)
setattr(cls, operator, _run_op)
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the variable's Tensor from 0
to infinity. Declaring this method prevents this unintended behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Variable' object is not iterable.")
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
raise NotImplementedError
@property
def initializer(self):
"""The initializer operation for this variable."""
raise NotImplementedError
@property
def device(self):
"""The device of this variable."""
raise NotImplementedError
@property
def dtype(self):
"""The `DType` of this variable."""
raise NotImplementedError
@property
def op(self):
"""The `Operation` of this variable."""
raise NotImplementedError
@property
def graph(self):
"""The `Graph` of this variable."""
raise NotImplementedError
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
raise NotImplementedError
def get_shape(self):
"""Alias of Variable.shape."""
raise NotImplementedError
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
raise NotImplementedError
@staticmethod
def from_proto(variable_def, import_scope=None):
"""Returns a `Variable` object created from `variable_def`."""
return RefVariable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
"""Information on how to save this Variable as a slice.
Provides internal support for saving variables as slices of a larger
variable. This API is not public and is subject to change.
Available properties:
* full_name
* full_shape
* var_offset
* var_shape
"""
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a
list of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents.
`save_slice_info_def` and other arguments are mutually
exclusive.
import_scope: Optional `string`. Name scope to add. Only used
when initializing from protocol buffer.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def __iadd__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def __imul__(self, other):
raise NotImplementedError
def __idiv__(self, other):
raise NotImplementedError
def __itruediv__(self, other):
raise NotImplementedError
def __irealdiv__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
@tf_export(v1=["Variable"])
class VariableV1(Variable):
"""See the [Variables Guide](https://tensorflow.org/guide/variables).
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
WARNING: tf.Variable objects by default have a non-intuitive memory model. A
Variable is represented internally as a mutable Tensor which can
non-deterministically alias other Tensors in a graph. The set of operations
which consume a Variable and can lead to aliasing is undetermined and can
change across TensorFlow versions. Avoid writing code which relies on the
value of a Variable either changing or not changing as other operations
happen. For example, using Variable objects or simple functions thereof as
predicates in a `tf.cond` is dangerous and error-prone:
```
v = tf.Variable(True)
tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.
```
Here replacing adding `use_resource=True` when constructing the variable will
fix any nondeterminism issues:
```
v = tf.Variable(True, use_resource=True)
tf.cond(v, lambda: v.assign(False), my_false_fn)
```
To use the replacement for variables which does
not have these issues:
* Add `use_resource=True` when constructing `tf.Variable`;
* Call `tf.get_variable_scope().set_use_resource(True)` inside a
`tf.variable_scope` before the `tf.get_variable()` call.
"""
def __init__(self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
use_resource: whether to use resource variables.
synchronization: unused
aggregation: unused
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
SaveSliceInfo = Variable.SaveSliceInfo
# TODO(apassos): do not repeat all comments here
class RefVariable(VariableV1):
"""Ref-based implementation of variables."""
def __init__(self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
self._in_graph_mode = True
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
# Create from initial_value.
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape,
constraint=constraint)
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % (
self.name, self.get_shape(), self.dtype.name,
ops.numpy_text(self.read_value(), is_repr=True))
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None,
constraint=None):
"""Creates a new variable from arguments.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
expected_shape: Deprecated. Ignored.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If lifted into the eager context.
"""
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
# Store the graph key so optimizers know how to only retrieve variables from
# this graph.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
# Ensure that we weren't lifted into the eager context.
if context.executing_eagerly():
raise RuntimeError(
"RefVariable not supported when eager execution is enabled. ")
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
true_name = ops._name_from_scope_name(name) # pylint: disable=protected-access
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if self._initial_value.op._get_control_flow_context() is not None:
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# In this case, the variable op can't be created until after the
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# If 'initial_value' makes use of other variables, make sure we don't
# have an issue if these other variables aren't initialized first by
# using their initialized_value() method.
self._initializer_op = state_ops.assign(
self._variable,
self._try_guard_against_uninitialized_dependencies(
self._initial_value),
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
self._constraint = constraint
def _init_from_proto(self, variable_def, import_scope=None):
"""Recreates the Variable object from a `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer, describing a variable
whose nodes already exists in the graph.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
# Tests whether initial_value_name exists first for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._constraint = None
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
"""Converts this variable to a Tensor.
See `tf.Variable.value`.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
return array_ops.identity(self._variable, name="read")
def _ref(self):
"""Returns a reference to this variable.
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See `tf.Variable.value` if you want to get the value of the
variable.
Returns:
A `Tensor` that is a reference to the variable.
"""
return self._variable
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
self._ref().set_shape(shape)
self.value().set_shape(shape)
@property
def trainable(self):
return self._trainable
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
return self._variable.eval(session=session)
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.init_scope():
return control_flow_ops.cond(is_variable_initialized(self),
self.read_value,
lambda: self.initial_value)
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
def assign(self, value, use_locking=False, name=None, read_value=True):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
assign = state_ops.assign(self._variable, value, use_locking=use_locking,
name=name)
if read_value:
return assign
return assign.op
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
assign = state_ops.assign_add(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
assign = state_ops.assign_sub(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_add(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_update(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_sub(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_add(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_update(
self._variable, indices, updates, use_locking=True, name=name)
def _strided_slice_assign(self,
begin,
end,
strides,
value,
name,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask):
return gen_array_ops.strided_slice_assign(ref=self._ref(),
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
"""Load new value into this variable.
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
if context.executing_eagerly():
self.assign(value)
else:
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
def _gather_saveables_for_checkpoint(self):
"""For implementing `Checkpointable`. This object is saveable on its own."""
return {checkpointable.VARIABLE_VALUE_KEY: self}
def _try_guard_against_uninitialized_dependencies(self, initial_value):
"""Attempt to guard against dependencies on uninitialized variables.
Replace references to variables in `initial_value` with references to the
variable's initialized values. The initialized values are essentially
conditional TensorFlow graphs that return a variable's value if it is
initialized or its `initial_value` if it hasn't been initialized. This
replacement is done on a best effort basis:
- If the `initial_value` graph contains cycles, we don't do any
replacements for that graph.
- If the variables that `initial_value` depends on are not present in the
`GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.
In these cases, it is up to the caller to ensure that the `initial_value`
graph uses initialized variables or that they guard access to variables
using their `initialized_value` method.
Args:
initial_value: `Tensor`. The initial value.
Returns:
A `Tensor` suitable to initialize a variable.
Raises:
TypeError: If `initial_value` is not a `Tensor`.
"""
if not isinstance(initial_value, ops.Tensor):
raise TypeError("initial_value needs to be a Tensor: %s" % initial_value)
# Don't modify initial_value if it contains any cyclic dependencies.
if _has_cycle(initial_value.op, path=set()):
return initial_value
return self._safe_initial_value_from_tensor(initial_value, op_cache={})
def _safe_initial_value_from_tensor(self, tensor, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
tensor: A `Tensor`. The tensor to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
A `Tensor` compatible with `tensor`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `tensor` will be returned unchanged.
"""
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = self._safe_initial_value_from_op(op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index]
def _safe_initial_value_from_op(self, op, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
op: An `Operation`. The operation to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
An `Operation` compatible with `op`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `op` will be returned unchanged.
"""
op_type = op.node_def.op
if op_type in ("IsVariableInitialized", "VarIsInitializedOp",
"ReadVariableOp"):
return op
# Attempt to find the initialized_value of any variable reference / handles.
# TODO(b/70206927): Fix handling of ResourceVariables.
if op_type in ("Variable", "VariableV2", "VarHandleOp"):
initialized_value = self._find_initialized_value_for_variable(op)
return op if initialized_value is None else initialized_value.op
# Recursively build initializer expressions for inputs.
modified = False
new_op_inputs = []
for op_input in op.inputs:
new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)
new_op_inputs.append(new_op_input)
modified = modified or (new_op_input != op_input)
# If at least one input was modified, replace the op.
if modified:
new_op_type = op_type
if new_op_type == "RefSwitch":
new_op_type = "Switch"
new_op_name = op.node_def.name + "_" + self.name
new_op_name = new_op_name.replace(":", "_")
return self.graph.create_op(
new_op_type, new_op_inputs,
op._output_types, # pylint: disable=protected-access
name=new_op_name, attrs=op.node_def.attr)
return op
def _find_initialized_value_for_variable(self, variable_op):
"""Find the initialized value for a variable op.
To do so, lookup the variable op in the variables collection.
Args:
variable_op: A variable `Operation`.
Returns:
A `Tensor` representing the initialized value for the variable or `None`
if the initialized value could not be found.
"""
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES):
for var in self.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
# Return None when an incomplete user-defined variable type was put in
# the collection.
return None
return None
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
return self._variable.name
@property
def _shared_name(self):
"""The shared name of the variable.
Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified
name with name scope prefix.
Returns:
variable name.
"""
return self.name[:-2]
@property
def initializer(self):
"""The initializer operation for this variable."""
return self._initializer_op
@property
def device(self):
"""The device of this variable."""
return self._variable.device
@property
def dtype(self):
"""The `DType` of this variable."""
return self._variable.dtype
@property
def op(self):
"""The `Operation` of this variable."""
return self._variable.op
@property
def graph(self):
"""The `Graph` of this variable."""
return self._variable.graph
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
return self._variable.get_shape()
def get_shape(self):
"""Alias of Variable.shape."""
return self.shape
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
if self._initial_value is not None:
# For backwards compatibility.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.trainable = self.trainable
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
"Variable += will be deprecated. Use variable.assign_add"
" if you want assignment to the variable value or 'x = x + y'"
" if you want a new python Tensor object.", 1)
return self + other
def __isub__(self, other):
logging.log_first_n(
logging.WARN,
"Variable -= will be deprecated. Use variable.assign_sub"
" if you want assignment to the variable value or 'x = x - y'"
" if you want a new python Tensor object.", 1)
return self - other
def __imul__(self, other):
logging.log_first_n(
logging.WARN,
"Variable *= will be deprecated. Use `var.assign(var * other)`"
" if you want assignment to the variable value or `x = x * y`"
" if you want a new python Tensor object.", 1)
return self * other
def __idiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __itruediv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __irealdiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __ipow__(self, other):
logging.log_first_n(
logging.WARN,
"Variable **= will be deprecated. Use `var.assign(var ** other)`"
" if you want assignment to the variable value or `x = x ** y`"
" if you want a new python Tensor object.", 1)
return self ** other
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
"""A container for partitioned `Variable` objects.
@compatibility(eager) `tf.PartitionedVariable` is not compatible with
eager execution. Use `tf.Variable` instead which is compatible
with both eager execution and graph construction. See [the
TensorFlow Eager Execution
guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
for details on how variables work in eager execution.
@end_compatibility
"""
def __init__(self, name, shape, dtype, variable_list, partitions):
"""Creates a new partitioned variable wrapper.
Variables passed via the variable_list must contain a save_slice_info
field. Concatenation and iteration is in lexicographic order according
to the var_offset property of the save_slice_info.
Args:
name: String. Overall name of the variables.
shape: List of integers. Overall shape of the variables.
dtype: Type of the variables.
variable_list: List of `Variable` that comprise this partitioned variable.
partitions: List of integers. Number of partitions for each dimension.
Raises:
TypeError: If `variable_list` is not a list of `Variable` objects, or
`partitions` is not a list.
ValueError: If `variable_list` is empty, or the `Variable` shape
information does not match `shape`, or `partitions` has invalid values.
"""
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all(p >= 1 for p in partitions):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
# pylint: disable=protected-access
for v in variable_list:
# Sort the variable_list lexicographically according to var offset value.
if not all(v._get_save_slice_info() is not None for v in variable_list):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if v._get_save_slice_info().full_shape != shape:
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
"""Return an iterable for accessing the underlying partition Variables."""
return iter(self._variable_list)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all(p == 1 for p in self._partitions):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
"""Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self.get_shape()
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def _apply_assign_fn(self, assign_fn, value):
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot do assign action along more than one dimension: %s. "
"Multi-axis partition assign action is not supported " %
str(partition_axes))
if isinstance(value, list):
assert len(value) == len(self._variable_list)
value_list = value
elif isinstance(value, PartitionedVariable):
value_list = [var_part for var_part in value]
else:
partition_ix = partition_axes[0]
size_splits_list = [
tensor_shape.dimension_value(var.shape[partition_ix])
for var in self._variable_list
]
value_list = array_ops.split(value, size_splits_list, axis=partition_ix)
op_list = [
assign_fn(var, value_list[idx])
for idx, var in enumerate(self._variable_list)
]
return op_list
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_add(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_sub(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
@tf_export(v1=["global_variables"])
def global_variables(scope=None):
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
`tf.local_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)
@tf_export(v1=["all_variables"])
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""See `tf.global_variables`."""
return global_variables()
def _all_saveable_objects(scope=None):
"""Returns all variables and `SaveableObject`s that must be checkpointed.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of `Variable` and `SaveableObject` to be checkpointed
"""
# TODO(andreasst): make this function public once things are settled.
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))
@tf_export(v1=["local_variables"])
def local_variables(scope=None):
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
`tf.global_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
@tf_export(v1=["model_variables"])
def model_variables(scope=None):
"""Returns all variables in the MODEL_VARIABLES collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
@tf_export(v1=["trainable_variables"])
def trainable_variables(scope=None):
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
@tf_export(v1=["moving_average_variables"])
def moving_average_variables(scope=None):
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)
@tf_export(v1=["initializers.variables", "variables_initializer"])
def variables_initializer(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list and not context.executing_eagerly():
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_export(v1=["initialize_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
"""See `tf.variables_initializer`."""
return variables_initializer(var_list, name=name)
@tf_export(v1=["initializers.global_variables", "global_variables_initializer"])
def global_variables_initializer():
"""Returns an Op that initializes global variables.
This is just a shortcut for `variables_initializer(global_variables())`
Returns:
An Op that initializes global variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="global_variables_initializer")
return variables_initializer(global_variables())
@tf_export(v1=["initialize_all_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""See `tf.global_variables_initializer`."""
return global_variables_initializer()
@tf_export(v1=["initializers.local_variables", "local_variables_initializer"])
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `variables_initializer(local_variables())`
Returns:
An Op that initializes all local variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="local_variables_initializer")
return variables_initializer(local_variables())
@tf_export(v1=["initialize_local_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
"""See `tf.local_variables_initializer`."""
return local_variables_initializer()
@tf_export(v1=["is_variable_initialized"])
@tf_should_use.should_use_result
def is_variable_initialized(variable):
"""Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
"""
return state_ops.is_variable_initialized(variable)
@tf_export(v1=["assert_variables_initialized"])
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
@tf_export(v1=["report_uninitialized_variables"])
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the uninitialized variables, or an empty
1-D tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
# Run all operations on CPU
if var_list:
init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
local_device = os.environ.get(
"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0")
with ops.device(local_device):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(array_ops.stack(init_vars))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of
# uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
# pylint: disable=protected-access
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
# pylint: enable=protected-access
ops.register_dense_tensor_like_type(Variable)
| 38.135857 | 134 | 0.682659 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import os
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def default_variable_creator(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def default_variable_creator_v2(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def _make_getter(captured_getter, captured_previous):
def getter(**kwargs):
return captured_getter(captured_previous, **kwargs)
return getter
def _has_cycle(op, path):
if op.name in path:
return True
path.add(op.name)
for op_input in op.inputs:
if _has_cycle(op_input.op, path):
return True
for op_control_input in op.control_inputs:
if _has_cycle(op_control_input, path):
return True
path.remove(op.name)
return False
@tf_export("VariableSynchronization")
class VariableSynchronization(enum.Enum):
AUTO = 0
NONE = 1
ON_WRITE = 2
ON_READ = 3
@tf_export("VariableAggregation", v1=[])
class VariableAggregationV2(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
@tf_export(v1=["VariableAggregation"])
class VariableAggregation(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
ONLY_FIRST_TOWER = 3
VariableAggregation.__doc__ = (
VariableAggregationV2.__doc__ +
"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n ")
class VariableMetaclass(type):
def _variable_v1_call(cls,
initial_value=None,
trainable=None,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack:
previous_getter = _make_getter(getter, previous_getter)
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
expected_shape=expected_shape,
import_scope=import_scope,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _variable_v2_call(cls,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
for getter in ops.get_default_graph()._variable_creator_stack:
previous_getter = _make_getter(getter, previous_getter)
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
import_scope=import_scope,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def __call__(cls, *args, **kwargs):
if cls is VariableV1:
return cls._variable_v1_call(*args, **kwargs)
elif cls is Variable:
return cls._variable_v2_call(*args, **kwargs)
else:
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
@tf_export("Variable", v1=[])
class Variable(six.with_metaclass(VariableMetaclass,
checkpointable.CheckpointableBase)):
def __init__(self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def value(self):
raise NotImplementedError
def read_value(self):
raise NotImplementedError
def set_shape(self, shape):
raise NotImplementedError
@property
def trainable(self):
raise NotImplementedError
def eval(self, session=None):
raise NotImplementedError
def initialized_value(self):
raise NotImplementedError
@property
def initial_value(self):
raise NotImplementedError
@property
def constraint(self):
raise NotImplementedError
def assign(self, value, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_add(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_update(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
raise NotImplementedError
def scatter_nd_add(self, indices, updates, name=None):
raise NotImplementedError
def scatter_nd_update(self, indices, updates, name=None):
raise NotImplementedError
def count_up_to(self, limit):
raise NotImplementedError
def load(self, value, session=None):
raise NotImplementedError
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref()
else:
return v.value()
@classmethod
def _OverloadAllOperators(cls):
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
cls._OverloadOperator(operator)
setattr(cls, "__getitem__", array_ops._SliceHelperVar)
@classmethod
def _OverloadOperator(cls, operator):
tensor_oper = getattr(ops.Tensor, operator)
def _run_op(a, *args, **kwargs):
return tensor_oper(a._AsTensor(), *args, **kwargs)
functools.update_wrapper(_run_op, tensor_oper)
setattr(cls, operator, _run_op)
def __iter__(self):
raise TypeError("'Variable' object is not iterable.")
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
@property
def name(self):
raise NotImplementedError
@property
def initializer(self):
raise NotImplementedError
@property
def device(self):
raise NotImplementedError
@property
def dtype(self):
raise NotImplementedError
@property
def op(self):
raise NotImplementedError
@property
def graph(self):
raise NotImplementedError
@property
def shape(self):
raise NotImplementedError
def get_shape(self):
raise NotImplementedError
def to_proto(self, export_scope=None):
raise NotImplementedError
@staticmethod
def from_proto(variable_def, import_scope=None):
return RefVariable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def __iadd__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def __imul__(self, other):
raise NotImplementedError
def __idiv__(self, other):
raise NotImplementedError
def __itruediv__(self, other):
raise NotImplementedError
def __irealdiv__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
@tf_export(v1=["Variable"])
class VariableV1(Variable):
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
SaveSliceInfo = Variable.SaveSliceInfo
class RefVariable(VariableV1):
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None):
self._in_graph_mode = True
if variable_def:
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape,
constraint=constraint)
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % (
self.name, self.get_shape(), self.dtype.name,
ops.numpy_text(self.read_value(), is_repr=True))
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None,
constraint=None):
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
self._graph_key = ops.get_default_graph()._graph_key
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
if context.executing_eagerly():
raise RuntimeError(
"RefVariable not supported when eager execution is enabled. ")
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
if self._initial_value.op._get_control_flow_context() is not None:
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# If 'initial_value' makes use of other variables, make sure we don't
# using their initialized_value() method.
self._initializer_op = state_ops.assign(
self._variable,
self._try_guard_against_uninitialized_dependencies(
self._initial_value),
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
self._constraint = constraint
def _init_from_proto(self, variable_def, import_scope=None):
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
# Tests whether initial_value_name exists first for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._constraint = None
def _as_graph_element(self):
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
return self._snapshot
def value(self):
return self._snapshot
def read_value(self):
return array_ops.identity(self._variable, name="read")
def _ref(self):
return self._variable
def set_shape(self, shape):
self._ref().set_shape(shape)
self.value().set_shape(shape)
@property
def trainable(self):
return self._trainable
def eval(self, session=None):
return self._variable.eval(session=session)
def initialized_value(self):
with ops.init_scope():
return control_flow_ops.cond(is_variable_initialized(self),
self.read_value,
lambda: self.initial_value)
@property
def initial_value(self):
return self._initial_value
@property
def constraint(self):
return self._constraint
def assign(self, value, use_locking=False, name=None, read_value=True):
assign = state_ops.assign(self._variable, value, use_locking=use_locking,
name=name)
if read_value:
return assign
return assign.op
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
assign = state_ops.assign_add(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
assign = state_ops.assign_sub(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_add(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_update(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_nd_sub(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_sub(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_add(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_add(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_update(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_update(
self._variable, indices, updates, use_locking=True, name=name)
def _strided_slice_assign(self,
begin,
end,
strides,
value,
name,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask):
return gen_array_ops.strided_slice_assign(ref=self._ref(),
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def count_up_to(self, limit):
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
if context.executing_eagerly():
self.assign(value)
else:
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
def _gather_saveables_for_checkpoint(self):
return {checkpointable.VARIABLE_VALUE_KEY: self}
def _try_guard_against_uninitialized_dependencies(self, initial_value):
if not isinstance(initial_value, ops.Tensor):
raise TypeError("initial_value needs to be a Tensor: %s" % initial_value)
# Don't modify initial_value if it contains any cyclic dependencies.
if _has_cycle(initial_value.op, path=set()):
return initial_value
return self._safe_initial_value_from_tensor(initial_value, op_cache={})
def _safe_initial_value_from_tensor(self, tensor, op_cache):
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = self._safe_initial_value_from_op(op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index]
def _safe_initial_value_from_op(self, op, op_cache):
op_type = op.node_def.op
if op_type in ("IsVariableInitialized", "VarIsInitializedOp",
"ReadVariableOp"):
return op
if op_type in ("Variable", "VariableV2", "VarHandleOp"):
initialized_value = self._find_initialized_value_for_variable(op)
return op if initialized_value is None else initialized_value.op
modified = False
new_op_inputs = []
for op_input in op.inputs:
new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)
new_op_inputs.append(new_op_input)
modified = modified or (new_op_input != op_input)
if modified:
new_op_type = op_type
if new_op_type == "RefSwitch":
new_op_type = "Switch"
new_op_name = op.node_def.name + "_" + self.name
new_op_name = new_op_name.replace(":", "_")
return self.graph.create_op(
new_op_type, new_op_inputs,
op._output_types,
name=new_op_name, attrs=op.node_def.attr)
return op
def _find_initialized_value_for_variable(self, variable_op):
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES):
for var in self.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
return None
return None
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
@property
def name(self):
return self._variable.name
@property
def _shared_name(self):
return self.name[:-2]
@property
def initializer(self):
return self._initializer_op
@property
def device(self):
return self._variable.device
@property
def dtype(self):
return self._variable.dtype
@property
def op(self):
return self._variable.op
@property
def graph(self):
return self._variable.graph
@property
def shape(self):
return self._variable.get_shape()
def get_shape(self):
return self.shape
def to_proto(self, export_scope=None):
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
if self._initial_value is not None:
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.trainable = self.trainable
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
"Variable += will be deprecated. Use variable.assign_add"
" if you want assignment to the variable value or 'x = x + y'"
" if you want a new python Tensor object.", 1)
return self + other
def __isub__(self, other):
logging.log_first_n(
logging.WARN,
"Variable -= will be deprecated. Use variable.assign_sub"
" if you want assignment to the variable value or 'x = x - y'"
" if you want a new python Tensor object.", 1)
return self - other
def __imul__(self, other):
logging.log_first_n(
logging.WARN,
"Variable *= will be deprecated. Use `var.assign(var * other)`"
" if you want assignment to the variable value or `x = x * y`"
" if you want a new python Tensor object.", 1)
return self * other
def __idiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __itruediv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __irealdiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __ipow__(self, other):
logging.log_first_n(
logging.WARN,
"Variable **= will be deprecated. Use `var.assign(var ** other)`"
" if you want assignment to the variable value or `x = x ** y`"
" if you want a new python Tensor object.", 1)
return self ** other
def _set_save_slice_info(self, save_slice_info):
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
def __init__(self, name, shape, dtype, variable_list, partitions):
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all(p >= 1 for p in partitions):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
for v in variable_list:
if not all(v._get_save_slice_info() is not None for v in variable_list):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if v._get_save_slice_info().full_shape != shape:
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
return iter(self._variable_list)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all(p == 1 for p in self._partitions):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self.get_shape()
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def _apply_assign_fn(self, assign_fn, value):
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot do assign action along more than one dimension: %s. "
"Multi-axis partition assign action is not supported " %
str(partition_axes))
if isinstance(value, list):
assert len(value) == len(self._variable_list)
value_list = value
elif isinstance(value, PartitionedVariable):
value_list = [var_part for var_part in value]
else:
partition_ix = partition_axes[0]
size_splits_list = [
tensor_shape.dimension_value(var.shape[partition_ix])
for var in self._variable_list
]
value_list = array_ops.split(value, size_splits_list, axis=partition_ix)
op_list = [
assign_fn(var, value_list[idx])
for idx, var in enumerate(self._variable_list)
]
return op_list
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_add(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_sub(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
@tf_export(v1=["global_variables"])
def global_variables(scope=None):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)
@tf_export(v1=["all_variables"])
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
return global_variables()
def _all_saveable_objects(scope=None):
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))
@tf_export(v1=["local_variables"])
def local_variables(scope=None):
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
@tf_export(v1=["model_variables"])
def model_variables(scope=None):
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
@tf_export(v1=["trainable_variables"])
def trainable_variables(scope=None):
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
@tf_export(v1=["moving_average_variables"])
def moving_average_variables(scope=None):
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)
@tf_export(v1=["initializers.variables", "variables_initializer"])
def variables_initializer(var_list, name="init"):
if var_list and not context.executing_eagerly():
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_export(v1=["initialize_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
return variables_initializer(var_list, name=name)
@tf_export(v1=["initializers.global_variables", "global_variables_initializer"])
def global_variables_initializer():
if context.executing_eagerly():
return control_flow_ops.no_op(name="global_variables_initializer")
return variables_initializer(global_variables())
@tf_export(v1=["initialize_all_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
return global_variables_initializer()
@tf_export(v1=["initializers.local_variables", "local_variables_initializer"])
def local_variables_initializer():
if context.executing_eagerly():
return control_flow_ops.no_op(name="local_variables_initializer")
return variables_initializer(local_variables())
@tf_export(v1=["initialize_local_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
return local_variables_initializer()
@tf_export(v1=["is_variable_initialized"])
@tf_should_use.should_use_result
def is_variable_initialized(variable):
return state_ops.is_variable_initialized(variable)
@tf_export(v1=["assert_variables_initialized"])
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
@tf_export(v1=["report_uninitialized_variables"])
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
if var_list:
init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
local_device = os.environ.get(
"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0")
with ops.device(local_device):
if not var_list:
return array_ops.constant([], dtype=dtypes.string)
else:
variables_mask = math_ops.logical_not(array_ops.stack(init_vars))
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
ops.register_dense_tensor_like_type(Variable)
| true | true |
f72b1a1f689e870dc85c7c284ed9fdf8f206b085 | 4,540 | py | Python | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | 1 | 2021-10-19T07:57:29.000Z | 2021-10-19T07:57:29.000Z | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | null | null | null | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from shapely.geometry import MultiPoint, Point, MultiLineString, LineString, Polygon, MultiPolygon
import geopandas as gpd
from tests.data import data_path
from tests.test_base import TestBase
class TestGeometryConvert(TestBase):
def test_register_functions(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.show()
def test_collect(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.collect()
def test_loading_from_file_deserialization(self):
geom = self.spark.read.\
options(delimiter="|", header=True).\
csv(os.path.join(data_path, "counties.csv")).\
limit(1).\
createOrReplaceTempView("counties")
geom_area = self.spark.sql("SELECT st_area(st_geomFromWKT(geom)) as area from counties").collect()[0][0]
polygon_shapely = self.spark.sql("SELECT st_geomFromWKT(geom) from counties").collect()[0][0]
assert geom_area == polygon_shapely.area
def test_polygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10),
(20 30, 35 35, 30 20, 20 30))') as geom"""
).collect()[0][0]
assert geom.area == 675.0
assert type(geom) == Polygon
def test_multipolygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))')"""
).collect()[0][0]
assert type(geom) == MultiPolygon
assert geom.area == 712.5
def test_multipolygon_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT()"""
)
def test_point_deserialization(self):
geom = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""").collect()[0][0]
assert geom.wkt == Point(-6.0, 52.0).wkt
def test_multipoint_deserialization(self):
geom = self.spark.sql("""select st_geomFromWKT('MULTIPOINT(1 2, -2 3)') as geom""").collect()[0][0]
assert geom.wkt == MultiPoint([(1, 2), (-2, 3)]).wkt
def test_linestring_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('LINESTRING (30 10, 10 30, 40 40)')"""
).collect()[0][0]
assert type(geom) == LineString
assert geom.wkt == LineString([(30, 10), (10, 30), (40, 40)]).wkt
def test_multilinestring_deserialization(self):
geom = self.spark.sql(
"""SELECT st_geomFromWKT('MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))') as geom"""
).collect()[0][0]
assert type(geom) == MultiLineString
assert geom.wkt == MultiLineString([
((10, 10), (20, 20), (10, 40)),
((40, 40), (30, 30), (40, 20), (30, 10))
]).wkt
def test_from_geopandas_convert(self):
gdf = gpd.read_file(os.path.join(data_path, "gis_osm_pois_free_1.shp"))
self.spark.createDataFrame(
gdf
).show()
def test_to_geopandas(self):
counties = self.spark. \
read. \
option("delimiter", "|"). \
option("header", "true"). \
csv(os.path.join(data_path, "counties.csv")).limit(1)
counties.createOrReplaceTempView("county")
counties_geom = self.spark.sql(
"SELECT *, st_geomFromWKT(geom) as geometry from county"
)
gdf = counties_geom.toPandas()
print(gpd.GeoDataFrame(gdf, geometry="geometry"))
| 36.32 | 112 | 0.624009 |
import os
from shapely.geometry import MultiPoint, Point, MultiLineString, LineString, Polygon, MultiPolygon
import geopandas as gpd
from tests.data import data_path
from tests.test_base import TestBase
class TestGeometryConvert(TestBase):
def test_register_functions(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.show()
def test_collect(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.collect()
def test_loading_from_file_deserialization(self):
geom = self.spark.read.\
options(delimiter="|", header=True).\
csv(os.path.join(data_path, "counties.csv")).\
limit(1).\
createOrReplaceTempView("counties")
geom_area = self.spark.sql("SELECT st_area(st_geomFromWKT(geom)) as area from counties").collect()[0][0]
polygon_shapely = self.spark.sql("SELECT st_geomFromWKT(geom) from counties").collect()[0][0]
assert geom_area == polygon_shapely.area
def test_polygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10),
(20 30, 35 35, 30 20, 20 30))') as geom"""
).collect()[0][0]
assert geom.area == 675.0
assert type(geom) == Polygon
def test_multipolygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))')"""
).collect()[0][0]
assert type(geom) == MultiPolygon
assert geom.area == 712.5
def test_multipolygon_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT()"""
)
def test_point_deserialization(self):
geom = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""").collect()[0][0]
assert geom.wkt == Point(-6.0, 52.0).wkt
def test_multipoint_deserialization(self):
geom = self.spark.sql("""select st_geomFromWKT('MULTIPOINT(1 2, -2 3)') as geom""").collect()[0][0]
assert geom.wkt == MultiPoint([(1, 2), (-2, 3)]).wkt
def test_linestring_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('LINESTRING (30 10, 10 30, 40 40)')"""
).collect()[0][0]
assert type(geom) == LineString
assert geom.wkt == LineString([(30, 10), (10, 30), (40, 40)]).wkt
def test_multilinestring_deserialization(self):
geom = self.spark.sql(
"""SELECT st_geomFromWKT('MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))') as geom"""
).collect()[0][0]
assert type(geom) == MultiLineString
assert geom.wkt == MultiLineString([
((10, 10), (20, 20), (10, 40)),
((40, 40), (30, 30), (40, 20), (30, 10))
]).wkt
def test_from_geopandas_convert(self):
gdf = gpd.read_file(os.path.join(data_path, "gis_osm_pois_free_1.shp"))
self.spark.createDataFrame(
gdf
).show()
def test_to_geopandas(self):
counties = self.spark. \
read. \
option("delimiter", "|"). \
option("header", "true"). \
csv(os.path.join(data_path, "counties.csv")).limit(1)
counties.createOrReplaceTempView("county")
counties_geom = self.spark.sql(
"SELECT *, st_geomFromWKT(geom) as geometry from county"
)
gdf = counties_geom.toPandas()
print(gpd.GeoDataFrame(gdf, geometry="geometry"))
| true | true |
f72b1cc1e0211ba34f94051f87bc32ad2cbf8b6f | 60 | py | Python | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | 17 | 2020-05-07T20:02:30.000Z | 2022-03-02T10:59:28.000Z | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | 3 | 2021-05-06T17:44:23.000Z | 2022-01-27T15:14:44.000Z | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | null | null | null | from .cli import main
if __name__ == '__main__':
main() | 15 | 26 | 0.65 | from .cli import main
if __name__ == '__main__':
main() | true | true |
f72b1d438ff6542f0231c5e19b54a4ca0fdfaef9 | 7,860 | py | Python | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data.segmentation import SegmentDataset
from model.segmentation.fcn import FCN32
from model.segmentation.unet import UNet, UNetVGG16
__all__ = [ "SegmentAgent" ]
class SegmentAgent:
"""Train Image Segmentation model
Requirements:
Simple baseline
- (15%) validation mIoU > 0.635
- (15%) testing mIoU > 0.625
"""
def __init__(self, config):
self.config = config
# Check environment
if torch.cuda.is_available():
self.device = torch.device(config['train']['device'])
else:
raise RuntimeError("Please train your model with GPU")
# Create dataset
tr_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
te_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
train_dataset = SegmentDataset(root=config['dataset']['train']['root'],
transform=tr_transform)
valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],
transform=te_transform)
# Create dataloader
self.train_loader = DataLoader(train_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=True)
self.valid_loader = DataLoader(valid_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=False)
# Create model
if config['train']['model'] == 'fcn':
self.model = FCN32(n_classes=7)
elif config['train']['model'] == 'unet':
self.model = UNetVGG16(n_classes=7)
self.model.to(self.device)
# Create optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])
# Create loss function
self.criterion = nn.CrossEntropyLoss()
# Create tensorboard
tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])
self.writer = SummaryWriter(tensorboard_dir)
# Logging
self.start_epoch = 0
self.current_epoch = -1
self.current_loss = 10000
# Resume training or not
if config['train']['resume']:
checkpoint_file = osp.join(config['train']['log_dir'],
config['train']['checkpoint_dir'],
'best.pth')
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in self.optimizer.param_groups:
param_group['lr'] = config['optim']['lr']
self.current_epoch = checkpoint['current_epoch'] + 1
self.start_epoch = self.current_epoch + 1
print("Resume training at epoch {}".format(self.start_epoch))
def train(self):
for epoch in range(self.start_epoch, self.config['train']['n_epochs']):
self.current_epoch = epoch
self.train_one_epoch()
self.validate()
def train_one_epoch(self):
running_loss = 0
self.model.train()
for i, (imgs, targets) in enumerate(self.train_loader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
# Forward & Backward
self.optimizer.zero_grad()
outputs = self.model(imgs) # (n, c, h, w)
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
# Cumulate result
running_loss += loss.item() * len(imgs)
# Show training information
if (i % self.config['train']['interval']) == 0:
print("Epoch {}:{}({}%), Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'],
int(i*100/len(self.train_loader)), loss.item()))
train_loss = running_loss / len(self.train_loader.dataset)
print("Epoch {}:{}, Train Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'], train_loss))
# Export result to tensorboard
self.writer.add_scalar("Train Loss", train_loss, self.current_epoch)
def validate(self):
running_loss = 0
pred_masks = []
true_masks = []
self.model.eval()
with torch.no_grad():
for imgs, targets in self.valid_loader:
imgs = imgs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(imgs) # (n, c, h, w)
# Save segmenation mask
pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)
pred_masks.append(pred_mask)
true_masks.append(targets.detach().cpu().numpy())
# Compute loss
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
# Validation Loss
running_loss += loss.item() * len(imgs)
# Show validation result
pred_masks = np.vstack(pred_masks)
true_masks = np.vstack(true_masks)
miou = self._mean_iou_score(pred_masks, true_masks)
valid_loss = running_loss / len(self.valid_loader.dataset)
print("Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}".format(
self.current_epoch, self.config['train']['n_epochs'],
valid_loss, miou))
# Save training checkpoints
if valid_loss < self.current_loss:
self.current_loss = valid_loss
self._save_checkpoint()
# Export result to tensorboard
self.writer.add_scalar("Valid Loss", valid_loss, self.current_epoch)
def finalize(self):
pass
def _save_checkpoint(self):
checkpoints = { 'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'current_loss': self.current_loss }
checkpoint_file = osp.join(self.config['train']['log_dir'],
self.config['train']['checkpoint_dir'],
'best.pth')
if not osp.exists(osp.dirname(checkpoint_file)):
os.makedirs(osp.dirname(checkpoint_file))
torch.save(checkpoints, checkpoint_file)
print("Save checkpoint to '{}'".format(checkpoint_file))
def _mean_iou_score(self, pred_masks, true_masks):
"""Compute mean IoU score over 6 classes"""
mean_iou = 0
for i in range(6):
tp_fp = np.sum(pred_masks == i)
tp_fn = np.sum(true_masks == i)
tp = np.sum((pred_masks == i) * (true_masks == i))
iou = tp / (tp_fp + tp_fn - tp)
mean_iou += iou / 6
return mean_iou
| 38.341463 | 91 | 0.549237 | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data.segmentation import SegmentDataset
from model.segmentation.fcn import FCN32
from model.segmentation.unet import UNet, UNetVGG16
__all__ = [ "SegmentAgent" ]
class SegmentAgent:
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.device = torch.device(config['train']['device'])
else:
raise RuntimeError("Please train your model with GPU")
tr_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
te_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
train_dataset = SegmentDataset(root=config['dataset']['train']['root'],
transform=tr_transform)
valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],
transform=te_transform)
self.train_loader = DataLoader(train_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=True)
self.valid_loader = DataLoader(valid_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=False)
if config['train']['model'] == 'fcn':
self.model = FCN32(n_classes=7)
elif config['train']['model'] == 'unet':
self.model = UNetVGG16(n_classes=7)
self.model.to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])
self.criterion = nn.CrossEntropyLoss()
tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])
self.writer = SummaryWriter(tensorboard_dir)
self.start_epoch = 0
self.current_epoch = -1
self.current_loss = 10000
if config['train']['resume']:
checkpoint_file = osp.join(config['train']['log_dir'],
config['train']['checkpoint_dir'],
'best.pth')
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in self.optimizer.param_groups:
param_group['lr'] = config['optim']['lr']
self.current_epoch = checkpoint['current_epoch'] + 1
self.start_epoch = self.current_epoch + 1
print("Resume training at epoch {}".format(self.start_epoch))
def train(self):
for epoch in range(self.start_epoch, self.config['train']['n_epochs']):
self.current_epoch = epoch
self.train_one_epoch()
self.validate()
def train_one_epoch(self):
running_loss = 0
self.model.train()
for i, (imgs, targets) in enumerate(self.train_loader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(imgs)
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
running_loss += loss.item() * len(imgs)
if (i % self.config['train']['interval']) == 0:
print("Epoch {}:{}({}%), Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'],
int(i*100/len(self.train_loader)), loss.item()))
train_loss = running_loss / len(self.train_loader.dataset)
print("Epoch {}:{}, Train Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'], train_loss))
self.writer.add_scalar("Train Loss", train_loss, self.current_epoch)
def validate(self):
running_loss = 0
pred_masks = []
true_masks = []
self.model.eval()
with torch.no_grad():
for imgs, targets in self.valid_loader:
imgs = imgs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(imgs)
pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)
pred_masks.append(pred_mask)
true_masks.append(targets.detach().cpu().numpy())
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
running_loss += loss.item() * len(imgs)
pred_masks = np.vstack(pred_masks)
true_masks = np.vstack(true_masks)
miou = self._mean_iou_score(pred_masks, true_masks)
valid_loss = running_loss / len(self.valid_loader.dataset)
print("Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}".format(
self.current_epoch, self.config['train']['n_epochs'],
valid_loss, miou))
if valid_loss < self.current_loss:
self.current_loss = valid_loss
self._save_checkpoint()
self.writer.add_scalar("Valid Loss", valid_loss, self.current_epoch)
def finalize(self):
pass
def _save_checkpoint(self):
checkpoints = { 'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'current_loss': self.current_loss }
checkpoint_file = osp.join(self.config['train']['log_dir'],
self.config['train']['checkpoint_dir'],
'best.pth')
if not osp.exists(osp.dirname(checkpoint_file)):
os.makedirs(osp.dirname(checkpoint_file))
torch.save(checkpoints, checkpoint_file)
print("Save checkpoint to '{}'".format(checkpoint_file))
def _mean_iou_score(self, pred_masks, true_masks):
mean_iou = 0
for i in range(6):
tp_fp = np.sum(pred_masks == i)
tp_fn = np.sum(true_masks == i)
tp = np.sum((pred_masks == i) * (true_masks == i))
iou = tp / (tp_fp + tp_fn - tp)
mean_iou += iou / 6
return mean_iou
| true | true |
f72b1e7549524106a9f828129970b89627719521 | 51,593 | py | Python | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | """
This module provides the functionality to create the temporal
SQL database and to establish a connection to the database.
Usage:
.. code-block:: python
>>> import grass.temporal as tgis
>>> # Create the temporal database
>>> tgis.init()
>>> # Establish a database connection
>>> dbif, connected = tgis.init_dbif(None)
>>> dbif.connect()
>>> # Execute a SQL statement
>>> dbif.execute_transaction("SELECT datetime(0, 'unixepoch', 'localtime');")
>>> # Mogrify an SQL statement
>>> dbif.mogrify_sql_statement(["SELECT name from raster_base where name = ?",
... ("precipitation",)])
"SELECT name from raster_base where name = 'precipitation'"
>>> dbif.close()
(C) 2011-2014 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:author: Soeren Gebbert
"""
#import traceback
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
# Import all supported database backends
# Ignore import errors since they are checked later
try:
import sqlite3
except ImportError:
pass
# Postgresql is optional, existence is checked when needed
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
###############################################################################
def profile_function(func):
"""Profiling function provided by the temporal framework"""
do_profiling = os.getenv("GRASS_TGIS_PROFILE")
if do_profiling == "True" or do_profiling == "1":
import cProfile, pstats
try:
import StringIO as io
except ImportError:
import io
pr = cProfile.Profile()
pr.enable()
func()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
else:
func()
# Global variable that defines the backend
# of the temporal GIS
# It can either be "sqlite" or "pg"
tgis_backend = None
def get_tgis_backend():
"""Return the temporal GIS backend as string
:returns: either "sqlite" or "pg"
"""
global tgis_backend
return tgis_backend
# Global variable that defines the database string
# of the temporal GIS
tgis_database = None
def get_tgis_database():
"""Return the temporal database string specified with t.connect
"""
global tgis_database
return tgis_database
# The version of the temporal framework
# this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes in the TGIS API
tgis_version = 2
# The version of the temporal database since framework and database version
# can differ this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes
# temporal database SQL layout
tgis_db_version = 2
# We need to know the parameter style of the database backend
tgis_dbmi_paramstyle = None
def get_tgis_dbmi_paramstyle():
"""Return the temporal database backend parameter style
:returns: "qmark" or ""
"""
global tgis_dbmi_paramstyle
return tgis_dbmi_paramstyle
# We need to access the current mapset quite often in the framework, so we make
# a global variable that will be initiated when init() is called
current_mapset = None
current_location = None
current_gisdbase = None
###############################################################################
def get_current_mapset():
"""Return the current mapset
This is the fastest way to receive the current mapset.
The current mapset is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_mapset
return current_mapset
###############################################################################
def get_current_location():
"""Return the current location
This is the fastest way to receive the current location.
The current location is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_location
return current_location
###############################################################################
def get_current_gisdbase():
"""Return the current gis database (gisdbase)
This is the fastest way to receive the current gisdbase.
The current gisdbase is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_gisdbase
return current_gisdbase
###############################################################################
# If this global variable is set True, then maps can only be registered in
# space time datasets with the same mapset. In addition, only maps in the
# current mapset can be inserted, updated or deleted from the temporal database.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_mapset_check = True
# If this global variable is set True, the timestamps of maps will be written
# as textfiles for each map that will be inserted or updated in the temporal
# database using the C-library timestamp interface.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_timestamp_write = True
def get_enable_mapset_check():
"""Return True if the mapsets should be checked while insert, update,
delete requests and space time dataset registration.
If this global variable is set True, then maps can only be registered
in space time datasets with the same mapset. In addition, only maps in
the current mapset can be inserted, updated or deleted from the temporal
database.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
..warning::
Be aware to face corrupted temporal database in case this
global variable is set to False. This feature is highly
experimental and violates the grass permission guidance.
"""
global enable_mapset_check
return enable_mapset_check
def get_enable_timestamp_write():
"""Return True if the map timestamps should be written to the spatial
database metadata as well.
If this global variable is set True, the timestamps of maps will be
written as textfiles for each map that will be inserted or updated in
the temporal database using the C-library timestamp interface.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
..warning::
Be aware that C-libraries can not access timestamp information if
they are not written as spatial database metadata, hence modules
that make use of timestamps using the C-library interface will not
work with maps that were created without writing the timestamps.
"""
global enable_timestamp_write
return enable_timestamp_write
###############################################################################
# The global variable that stores the PyGRASS Messenger object that
# provides a fast and exit safe interface to the C-library message functions
message_interface = None
def _init_tgis_message_interface(raise_on_error=False):
"""Initiate the global message interface
:param raise_on_error: If True raise a FatalError exception in case of
a fatal error, call sys.exit(1) otherwise
"""
global message_interface
if message_interface is None:
message_interface = messages.get_msgr(raise_on_error=raise_on_error)
def get_tgis_message_interface():
"""Return the temporal GIS message interface which is of type
grass.pygrass.message.Messenger()
Use this message interface to print messages to stdout using the
GRASS C-library messaging system.
"""
global message_interface
return message_interface
###############################################################################
# The global variable that stores the C-library interface object that
# provides a fast and exit safe interface to the C-library libgis,
# libraster, libraster3d and libvector functions
c_library_interface = None
def _init_tgis_c_library_interface():
"""Set the global C-library interface variable that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
if c_library_interface is None:
c_library_interface = CLibrariesInterface()
def get_tgis_c_library_interface():
"""Return the C-library interface that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
return c_library_interface
###############################################################################
# Set this variable True to raise a FatalError exception
# in case a fatal error occurs using the messenger interface
raise_on_error = False
def set_raise_on_error(raise_exp=True):
"""Define behavior on fatal error, invoked using the tgis messenger
interface (msgr.fatal())
The messenger interface will be restarted using the new error policy
:param raise_exp: True to raise a FatalError exception instead of calling
sys.exit(1) when using the tgis messenger interface
.. code-block:: python
>>> import grass.temporal as tgis
>>> tgis.init()
>>> ignore = tgis.set_raise_on_error(False)
>>> msgr = tgis.get_tgis_message_interface()
>>> tgis.get_raise_on_error()
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 239, in fatal
sys.exit(1)
SystemExit: 1
>>> tgis.set_raise_on_error(True)
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 241, in fatal
raise FatalError(message)
FatalError: Ohh no no no!
:returns: current status
"""
global raise_on_error
tmp_raise = raise_on_error
raise_on_error = raise_exp
global message_interface
if message_interface:
message_interface.set_raise_on_error(raise_on_error)
else:
_init_tgis_message_interface(raise_on_error)
return tmp_raise
def get_raise_on_error():
"""Return True if a FatalError exception is raised instead of calling
sys.exit(1) in case a fatal error was invoked with msgr.fatal()
"""
global raise_on_error
return raise_on_error
###############################################################################
def get_tgis_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_version
return tgis_version
###############################################################################
def get_tgis_db_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_db_version
return tgis_db_version
###############################################################################
def get_tgis_metadata(dbif=None):
"""Return the tgis metadata table as a list of rows (dicts) or None if not
present
:param dbif: The database interface to be used
:returns: The selected rows with key/value columns or None
"""
dbif, connected = init_dbif(dbif)
# Select metadata if the table is present
try:
statement = "SELECT * FROM tgis_metadata;\n"
dbif.execute(statement)
rows = dbif.fetchall()
except:
rows = None
if connected:
dbif.close()
return rows
###############################################################################
# The temporal database string set with t.connect
# with substituted GRASS variables gisdbase, location and mapset
tgis_database_string = None
def get_tgis_database_string():
"""Return the preprocessed temporal database string
This string is the temporal database string set with t.connect
that was processed to substitue location, gisdbase and mapset
variables.
"""
global tgis_database_string
return tgis_database_string
###############################################################################
def get_sql_template_path():
base = os.getenv("GISBASE")
base_etc = os.path.join(base, "etc")
return os.path.join(base_etc, "sql")
###############################################################################
def stop_subprocesses():
"""Stop the messenger and C-interface subprocesses
that are started by tgis.init()
"""
global message_interface
global c_library_interface
if message_interface:
message_interface.stop()
if c_library_interface:
c_library_interface.stop()
# We register this function to be called at exit
atexit.register(stop_subprocesses)
def get_available_temporal_mapsets():
"""Return a list of of mapset names with temporal database driver and names
that are accessible from the current mapset.
:returns: A dictionary, mapset names are keys, the tuple (driver,
database) are the values
"""
global c_library_interface
global message_interface
mapsets = c_library_interface.available_mapsets()
tgis_mapsets = {}
for mapset in mapsets:
mapset = mapset
driver = c_library_interface.get_driver_name(mapset)
database = c_library_interface.get_database_name(mapset)
message_interface.debug(1, "get_available_temporal_mapsets: "\
"\n mapset %s\n driver %s\n database %s"%(mapset,
driver, database))
if driver and database:
# Check if the temporal sqlite database exists
# We need to set non-existing databases in case the mapset is the current mapset
# to create it
if (driver == "sqlite" and os.path.exists(database)) or mapset == get_current_mapset() :
tgis_mapsets[mapset] = (driver, database)
# We need to warn if the connection is defined but the database does not
# exists
if driver == "sqlite" and not os.path.exists(database):
message_interface.warning("Temporal database connection defined as:\n" + \
database + "\nBut database file does not exist.")
return tgis_mapsets
###############################################################################
def init(raise_fatal_error=False):
"""This function set the correct database backend from GRASS environmental
variables and creates the grass temporal database structure for raster,
vector and raster3d maps as well as for the space-time datasets strds,
str3ds and stvds in case it does not exist.
Several global variables are initiated and the messenger and C-library
interface subprocesses are spawned.
Re-run this function in case the following GRASS variables change while
the process runs:
- MAPSET
- LOCATION_NAME
- GISDBASE
- TGIS_DISABLE_MAPSET_CHECK
- TGIS_DISABLE_TIMESTAMP_WRITE
Re-run this function if the following t.connect variables change while
the process runs:
- temporal GIS driver (set by t.connect driver=)
- temporal GIS database (set by t.connect database=)
The following environmental variables are checked:
- GRASS_TGIS_PROFILE (True, False, 1, 0)
- GRASS_TGIS_RAISE_ON_ERROR (True, False, 1, 0)
..warning::
This functions must be called before any spatio-temporal processing
can be started
:param raise_fatal_error: Set this True to assure that the init()
function does not kill a persistent process
like the GUI. If set True a
grass.pygrass.messages.FatalError
exception will be raised in case a fatal
error occurs in the init process, otherwise
sys.exit(1) will be called.
"""
# We need to set the correct database backend and several global variables
# from the GRASS mapset specific environment variables of g.gisenv and t.connect
global tgis_backend
global tgis_database
global tgis_database_string
global tgis_dbmi_paramstyle
global raise_on_error
global enable_mapset_check
global enable_timestamp_write
global current_mapset
global current_location
global current_gisdbase
raise_on_error = raise_fatal_error
# We must run t.connect at first to create the temporal database and to
# get the environmental variables
gscript.run_command("t.connect", flags="c")
grassenv = gscript.gisenv()
# Set the global variable for faster access
current_mapset = grassenv["MAPSET"]
current_location = grassenv["LOCATION_NAME"]
current_gisdbase = grassenv["GISDBASE"]
# Check environment variable GRASS_TGIS_RAISE_ON_ERROR
if os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "True" or \
os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "1":
raise_on_error = True
# Check if the script library raises on error,
# if so we do the same
if gscript.get_raise_on_error() is True:
raise_on_error = True
# Start the GRASS message interface server
_init_tgis_message_interface(raise_on_error)
# Start the C-library interface server
_init_tgis_c_library_interface()
msgr = get_tgis_message_interface()
msgr.debug(1, "Initiate the temporal database")
#"\n traceback:%s"%(str(" \n".join(traceback.format_stack()))))
msgr.debug(1, ("Raise on error id: %s"%str(raise_on_error)))
ciface = get_tgis_c_library_interface()
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
# Set the mapset check and the timestamp write
if "TGIS_DISABLE_MAPSET_CHECK" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "1":
enable_mapset_check = False
msgr.warning("TGIS_DISABLE_MAPSET_CHECK is True")
if "TGIS_DISABLE_TIMESTAMP_WRITE" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "1":
enable_timestamp_write = False
msgr.warning("TGIS_DISABLE_TIMESTAMP_WRITE is True")
if driver_string is not None and driver_string != "":
driver_string = decode(driver_string)
if driver_string == "sqlite":
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
elif driver_string == "pg":
tgis_backend = driver_string
try:
import psycopg2
except ImportError:
msgr.error("Unable to locate the Postgresql SQL Python "
"interface module psycopg2.")
raise
dbmi = psycopg2
else:
msgr.fatal(_("Unable to initialize the temporal DBMI interface. "
"Please use t.connect to specify the driver and the"
" database string"))
else:
# Set the default sqlite3 connection in case nothing was defined
gscript.run_command("t.connect", flags="d")
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
tgis_database_string = database_string
# Set the parameter style
tgis_dbmi_paramstyle = dbmi.paramstyle
# We do not know if the database already exists
db_exists = False
dbif = SQLDatabaseInterfaceConnection()
# Check if the database already exists
if tgis_backend == "sqlite":
# Check path of the sqlite database
if os.path.exists(tgis_database_string):
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='raster_base';")
name = dbif.fetchone()
if name and name[0] == "raster_base":
db_exists = True
dbif.close()
elif tgis_backend == "pg":
# Connect to database
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('raster_base',))
if dbif.fetchone()[0]:
db_exists = True
backup_howto = "The format of your actual temporal database is not " \
"supported any more.\nSolution: You need to export it by " \
"restoring the GRASS GIS version used for creating this DB"\
". From there, create a backup of your temporal database "\
"to avoid the loss of your temporal data.\nNotes: Use " \
"t.rast.export and t.vect.export to make a backup of your" \
" existing space time datasets.To safe the timestamps of" \
" your existing maps and space time datasets, use " \
"t.rast.list, t.vect.list and t.rast3d.list. "\
"You can register the existing time stamped maps easily if"\
" you export columns=id,start_time,end_time into text "\
"files and use t.register to register them again in new" \
" created space time datasets (t.create). After the backup"\
" remove the existing temporal database, a new one will be"\
" created automatically.\n"
if db_exists is True:
# Check the version of the temporal database
dbif.close()
dbif.connect()
metadata = get_tgis_metadata(dbif)
dbif.close()
if metadata is None:
msgr.fatal(_("Unable to receive temporal database metadata.\n"
"Current temporal database info:%(info)s") % (
{"info": get_database_info_string()}))
for entry in metadata:
if "tgis_version" in entry and entry[1] != str(get_tgis_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)s Supported temporal API version is:"
" %(api)i.\nPlease update your GRASS GIS "
"installation.\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"api": get_tgis_version(),
"info": get_database_info_string()}))
if "tgis_db_version" in entry and entry[1] != str(get_tgis_db_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)sSupported temporal database version"
" is: %(tdb)i\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"tdb": get_tgis_version(),
"info": get_database_info_string()}))
return
create_temporal_database(dbif)
###############################################################################
def get_database_info_string():
dbif = SQLDatabaseInterfaceConnection()
info = "\nDBMI interface:..... " + str(dbif.get_dbmi().__name__)
info += "\nTemporal database:.. " + str(get_tgis_database_string())
return info
###############################################################################
def create_temporal_database(dbif):
"""This function will create the temporal database
It will create all tables and triggers that are needed to run
the temporal GIS
:param dbif: The database interface to be used
"""
global tgis_backend
global tgis_version
global tgis_db_version
global tgis_database_string
template_path = get_sql_template_path()
msgr = get_tgis_message_interface()
# Read all SQL scripts and templates
map_tables_template_sql = open(os.path.join(
template_path, "map_tables_template.sql"), 'r').read()
raster_metadata_sql = open(os.path.join(
get_sql_template_path(), "raster_metadata_table.sql"), 'r').read()
raster3d_metadata_sql = open(os.path.join(template_path,
"raster3d_metadata_table.sql"),
'r').read()
vector_metadata_sql = open(os.path.join(template_path,
"vector_metadata_table.sql"),
'r').read()
raster_views_sql = open(os.path.join(template_path, "raster_views.sql"),
'r').read()
raster3d_views_sql = open(os.path.join(template_path,
"raster3d_views.sql"), 'r').read()
vector_views_sql = open(os.path.join(template_path, "vector_views.sql"),
'r').read()
stds_tables_template_sql = open(os.path.join(template_path,
"stds_tables_template.sql"),
'r').read()
strds_metadata_sql = open(os.path.join(template_path,
"strds_metadata_table.sql"),
'r').read()
str3ds_metadata_sql = open(os.path.join(template_path,
"str3ds_metadata_table.sql"),
'r').read()
stvds_metadata_sql = open(os.path.join(template_path,
"stvds_metadata_table.sql"),
'r').read()
strds_views_sql = open(os.path.join(template_path, "strds_views.sql"),
'r').read()
str3ds_views_sql = open(os.path.join(template_path, "str3ds_views.sql"),
'r').read()
stvds_views_sql = open(os.path.join(template_path, "stvds_views.sql"),
'r').read()
# Create the raster, raster3d and vector tables SQL statements
raster_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "raster")
vector_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "vector")
raster3d_tables_sql = map_tables_template_sql.replace(
"GRASS_MAP", "raster3d")
# Create the space-time raster, raster3d and vector dataset tables
# SQL statements
strds_tables_sql = stds_tables_template_sql.replace("STDS", "strds")
stvds_tables_sql = stds_tables_template_sql.replace("STDS", "stvds")
str3ds_tables_sql = stds_tables_template_sql.replace("STDS", "str3ds")
msgr.message(_("Creating temporal database: %s" % (str(tgis_database_string))))
if tgis_backend == "sqlite":
# We need to create the sqlite3 database path if it does not exist
tgis_dir = os.path.dirname(tgis_database_string)
if not os.path.exists(tgis_dir):
try:
os.makedirs(tgis_dir)
except Exception as e:
msgr.fatal(_("Unable to create SQLite temporal database\n"
"Exception: %s\nPlease use t.connect to set a "
"read- and writable temporal database path" % (e)))
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"sqlite3_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path, "sqlite3_indexes.sql"),
'r').read()
else:
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"postgresql_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path,
"postgresql_indexes.sql"), 'r').read()
# Connect now to the database
if dbif.connected is not True:
dbif.connect()
# Execute the SQL statements for sqlite
# Create the global tables for the native grass datatypes
dbif.execute_transaction(raster_tables_sql)
dbif.execute_transaction(raster_metadata_sql)
dbif.execute_transaction(raster_views_sql)
dbif.execute_transaction(vector_tables_sql)
dbif.execute_transaction(vector_metadata_sql)
dbif.execute_transaction(vector_views_sql)
dbif.execute_transaction(raster3d_tables_sql)
dbif.execute_transaction(raster3d_metadata_sql)
dbif.execute_transaction(raster3d_views_sql)
# Create the tables for the new space-time datatypes
dbif.execute_transaction(strds_tables_sql)
dbif.execute_transaction(strds_metadata_sql)
dbif.execute_transaction(strds_views_sql)
dbif.execute_transaction(stvds_tables_sql)
dbif.execute_transaction(stvds_metadata_sql)
dbif.execute_transaction(stvds_views_sql)
dbif.execute_transaction(str3ds_tables_sql)
dbif.execute_transaction(str3ds_metadata_sql)
dbif.execute_transaction(str3ds_views_sql)
# The delete trigger
dbif.execute_transaction(delete_trigger_sql)
# The indexes
dbif.execute_transaction(indexes_sql)
# Create the tgis metadata table to store the database
# initial configuration
# The metadata table content
metadata = {}
metadata["tgis_version"] = tgis_version
metadata["tgis_db_version"] = tgis_db_version
metadata["creation_time"] = datetime.today()
_create_tgis_metadata_table(metadata, dbif)
dbif.close()
###############################################################################
def _create_tgis_metadata_table(content, dbif=None):
"""!Create the temporal gis metadata table which stores all metadata
information about the temporal database.
:param content: The dictionary that stores the key:value metadata
that should be stored in the metadata table
:param dbif: The database interface to be used
"""
dbif, connected = init_dbif(dbif)
statement = "CREATE TABLE tgis_metadata (key VARCHAR NOT NULL, value VARCHAR);\n";
dbif.execute_transaction(statement)
for key in content.keys():
statement = "INSERT INTO tgis_metadata (key, value) VALUES " + \
"(\'%s\' , \'%s\');\n" % (str(key), str(content[key]))
dbif.execute_transaction(statement)
if connected:
dbif.close()
###############################################################################
class SQLDatabaseInterfaceConnection(object):
def __init__(self):
self.tgis_mapsets = get_available_temporal_mapsets()
self.current_mapset = get_current_mapset()
self.connections = {}
self.connected = False
self.unique_connections = {}
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
if dbstring not in self.unique_connections.keys():
self.unique_connections[dbstring] = DBConnection(backend=driver,
dbstring=dbstring)
self.connections[mapset] = self.unique_connections[dbstring]
self.msgr = get_tgis_message_interface()
def get_dbmi(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
return self.connections[mapset].dbmi
def rollback(self, mapset=None):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if mapset is None:
mapset = self.current_mapset
def connect(self):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
"""
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
conn = self.connections[mapset]
if conn.is_connected() is False:
conn.connect(dbstring)
self.connected = True
def is_connected(self):
return self.connected
def close(self):
"""Close the DBMI connection
There may be several temporal databases in a location, hence
close all temporal databases that have been opened.
"""
for key in self.unique_connections.keys():
self.unique_connections[key].close()
self.connected = False
def mogrify_sql_statement(self, content, mapset=None):
"""Return the SQL statement and arguments as executable SQL string
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to mogrify sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].mogrify_sql_statement(content)
def check_table(self, table_name, mapset=None):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to check table. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].check_table(table_name)
def execute(self, statement, args=None, mapset=None):
"""
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute(statement, args)
def fetchone(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch one. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchone()
def fetchall(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch all. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchall()
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute transaction. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute_transaction(statement)
def _create_mapset_error_message(self, mapset):
return("You have no permission to "
"access mapset <%(mapset)s>, or "
"mapset <%(mapset)s> has no temporal database. "
"Accessible mapsets are: <%(mapsets)s>" % \
{"mapset": decode(mapset),
"mapsets":','.join(self.tgis_mapsets.keys())})
###############################################################################
class DBConnection(object):
"""This class represents the database interface connection
and provides access to the chosen backend modules.
The following DBMS are supported:
- sqlite via the sqlite3 standard library
- postgresql via psycopg2
"""
def __init__(self, backend=None, dbstring=None):
""" Constructor of a database connection
param backend:The database backend sqlite or pg
param dbstring: The database connection string
"""
self.connected = False
if backend is None:
global tgis_backend
if decode(tgis_backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
else:
if decode(backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
if dbstring is None:
global tgis_database_string
self.dbstring = tgis_database_string
self.dbstring = dbstring
self.msgr = get_tgis_message_interface()
self.msgr.debug(1, "DBConnection constructor:"\
"\n backend: %s"\
"\n dbstring: %s"%(backend, self.dbstring))
#"\n traceback:%s"%(backend, self.dbstring,
#str(" \n".join(traceback.format_stack()))))
def __del__(self):
if self.connected is True:
self.close()
def is_connected(self):
return self.connected
def rollback(self):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if self.dbmi.__name__ == "psycopg2":
if self.connected:
self.connection.rollback()
def connect(self, dbstring=None):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
param dbstring: The database connection string
"""
# Connection in the current mapset
if dbstring is None:
dbstring = self.dbstring
dbstring = decode(dbstring)
try:
if self.dbmi.__name__ == "sqlite3":
self.connection = self.dbmi.connect(dbstring,
detect_types=self.dbmi.PARSE_DECLTYPES | self.dbmi.PARSE_COLNAMES)
self.connection.row_factory = self.dbmi.Row
self.connection.isolation_level = None
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.cursor.execute("PRAGMA synchronous = OFF")
self.cursor.execute("PRAGMA journal_mode = MEMORY")
elif self.dbmi.__name__ == "psycopg2":
self.connection = self.dbmi.connect(dbstring)
#self.connection.set_isolation_level(dbmi.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.connection.cursor(
cursor_factory=self.dbmi.extras.DictCursor)
self.connected = True
except Exception as e:
self.msgr.fatal(_("Unable to connect to %(db)s database: "
"%(string)s\nException: \"%(ex)s\"\nPlease use"
" t.connect to set a read- and writable "
"temporal database backend") % (
{"db": self.dbmi.__name__,
"string": tgis_database_string, "ex": e, }))
def close(self):
"""Close the DBMI connection
TODO:
There may be several temporal databases in a location, hence
close all temporal databases that have been opened. Use a dictionary
to manage different connections.
"""
self.connection.commit()
self.cursor.close()
self.connected = False
def mogrify_sql_statement(self, content):
"""Return the SQL statement and arguments as executable SQL string
TODO:
Use the mapset argument to identify the correct database driver
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
Usage:
.. code-block:: python
>>> init()
>>> dbif = SQLDatabaseInterfaceConnection()
>>> dbif.mogrify_sql_statement(["SELECT ctime FROM raster_base WHERE id = ?",
... ["soil@PERMANENT",]])
"SELECT ctime FROM raster_base WHERE id = 'soil@PERMANENT'"
"""
sql = content[0]
args = content[1]
if self.dbmi.__name__ == "psycopg2":
if len(args) == 0:
return sql
else:
if self.connected:
try:
return self.cursor.mogrify(sql, args)
except Exception as exc:
print(sql, args)
raise exc
else:
self.connect()
statement = self.cursor.mogrify(sql, args)
self.close()
return statement
elif self.dbmi.__name__ == "sqlite3":
if len(args) == 0:
return sql
else:
# Unfortunately as sqlite does not support
# the transformation of sql strings and qmarked or
# named arguments we must make our hands dirty
# and do it by ourself. :(
# Doors are open for SQL injection because of the
# limited python sqlite3 implementation!!!
pos = 0
count = 0
maxcount = 100
statement = sql
while count < maxcount:
pos = statement.find("?", pos + 1)
if pos == -1:
break
if args[count] is None:
statement = "%sNULL%s" % (statement[0:pos],
statement[pos + 1:])
elif isinstance(args[count], (int, long)):
statement = "%s%d%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], float):
statement = "%s%f%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], datetime):
statement = "%s\'%s\'%s" % (statement[0:pos], str(args[count]),
statement[pos + 1:])
else:
# Default is a string, this works for datetime
# objects too
statement = "%s\'%s\'%s" % (statement[0:pos],
str(args[count]),
statement[pos + 1:])
count += 1
return statement
def check_table(self, table_name):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
table_exists = False
connected = False
if not self.connected:
self.connect()
connected = True
# Check if the database already exists
if self.dbmi.__name__ == "sqlite3":
self.cursor.execute("SELECT name FROM sqlite_master WHERE "
"type='table' AND name='%s';" % table_name)
name = self.cursor.fetchone()
if name and name[0] == table_name:
table_exists = True
else:
# Check for raster_base table
self.cursor.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('%s' % table_name,))
if self.cursor.fetchone()[0]:
table_exists = True
if connected:
self.close()
return table_exists
def execute(self, statement, args=None):
"""Execute a SQL statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
try:
if args:
self.cursor.execute(statement, args)
else:
self.cursor.execute(statement)
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute :\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
def fetchone(self):
if self.connected:
return self.cursor.fetchone()
return None
def fetchall(self):
if self.connected:
return self.cursor.fetchall()
return None
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
sql_script = ""
sql_script += "BEGIN TRANSACTION;\n"
sql_script += statement
sql_script += "END TRANSACTION;"
try:
if self.dbmi.__name__ == "sqlite3":
self.cursor.executescript(statement)
else:
self.cursor.execute(statement)
self.connection.commit()
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute transaction:\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
###############################################################################
def init_dbif(dbif):
"""This method checks if the database interface connection exists,
if not a new one will be created, connected and True will be returned.
If the database interface exists but is connected, the connection will
be established.
:returns: the tuple (dbif, True|False)
Usage code sample:
.. code-block:: python
dbif, connect = tgis.init_dbif(None)
sql = dbif.mogrify_sql_statement(["SELECT * FROM raster_base WHERE ? = ?"],
["id", "soil@PERMANENT"])
dbif.execute_transaction(sql)
if connect:
dbif.close()
"""
if dbif is None:
dbif = SQLDatabaseInterfaceConnection()
dbif.connect()
return dbif, True
elif dbif.is_connected() is False:
dbif.connect()
return dbif, True
return dbif, False
###############################################################################
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37.170749 | 100 | 0.589111 |
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
try:
import sqlite3
except ImportError:
pass
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
| true | true |
f72b21cb7cd90c4cedf514ee804f2b47f748ee67 | 4,395 | py | Python | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | 1 | 2021-07-27T21:03:40.000Z | 2021-07-27T21:03:40.000Z | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
class Runner:
def __init__(self, config: Any, base_dirpath: str):
self.config = config
self.base_dirpath = base_dirpath
self.watcher = Watcher(runner=self)
self.files_dependencies = FilesDependenciesManager(watcher=self.watcher)
def _run_in_file(self, source_filepath: str, output_filepath: str, run_test: bool):
try:
with open(source_filepath, 'r') as source_markdown_file:
source_file_content = source_markdown_file.read()
rendered_file_content = ""
remaining_unprocessed_file_content = source_file_content
transformers_names_selectors: str = '|'.join(self.config.transformers.keys())
transformers_regex = '({{)' + f'({transformers_names_selectors})' + '(::)((.|\n)*)(::}})'
# Instead of looking for each transformer one by one, we create a simple regex tasked with finding any transformer
for match in re.finditer(pattern=transformers_regex, string=source_file_content):
match_start = match.start()
match_end = match.end()
index_relative_to_remaining_unprocessed = len(source_file_content) - len(remaining_unprocessed_file_content)
unprocessed_text_pre_match = remaining_unprocessed_file_content[0:match_start - index_relative_to_remaining_unprocessed]
remaining_unprocessed_file_content = remaining_unprocessed_file_content[match_end - index_relative_to_remaining_unprocessed:]
transformer_name = match[2]
transformer_attribute = match[4]
transformer_class_type = self.config.transformers.get(transformer_name, None)
if transformer_class_type is None:
raise Exception(f"No transformer found for {transformer_name}")
transformer_instance = transformer_class_type(
runner=self, source_filepath=source_filepath, attribute=transformer_attribute
)
if run_test is True:
transformer_instance.test()
transformed_content = transformer_instance.transform()
rendered_file_content += f"{unprocessed_text_pre_match}{transformed_content}"
rendered_file_content += remaining_unprocessed_file_content
with open(output_filepath, 'w+') as output_file:
output_file.write(rendered_file_content)
except Exception as e:
logging.warning(e)
def _run_with_filepath(self, source_filepath: str, run_test: bool):
source_filepath_object = Path(source_filepath)
formatted_output_filename = source_filepath_object.name[2:]
output_filepath = os.path.join(source_filepath_object.parent, formatted_output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_test)
def _run_in_folder(self, dirpath: str, run_tests: bool):
for root_dirpath, dirs, filenames in os.walk(dirpath):
for filename in filenames:
if filename[0:2] == '__':
source_filepath = os.path.join(root_dirpath, filename)
output_filename = filename[2:]
output_filepath = os.path.join(root_dirpath, output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_tests)
def _start(self, run_tests: bool):
self._run_in_folder(dirpath=self.base_dirpath, run_tests=run_tests)
# When starting the runner, we first run the base_dirpath folder once, which
# will build all of our mdscript files, and index all the dependency files.
self.watcher.start()
# Then, we simply start the watcher, which will always watch the entire base_dirpath
# folder, and all of the dependencies files will have already been added to its watch.
def start(self):
self._start(run_tests=False)
def start_with_tests(self):
self._start(run_tests=True)
| 49.943182 | 145 | 0.669852 | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
class Runner:
def __init__(self, config: Any, base_dirpath: str):
self.config = config
self.base_dirpath = base_dirpath
self.watcher = Watcher(runner=self)
self.files_dependencies = FilesDependenciesManager(watcher=self.watcher)
def _run_in_file(self, source_filepath: str, output_filepath: str, run_test: bool):
try:
with open(source_filepath, 'r') as source_markdown_file:
source_file_content = source_markdown_file.read()
rendered_file_content = ""
remaining_unprocessed_file_content = source_file_content
transformers_names_selectors: str = '|'.join(self.config.transformers.keys())
transformers_regex = '({{)' + f'({transformers_names_selectors})' + '(::)((.|\n)*)(::}})'
for match in re.finditer(pattern=transformers_regex, string=source_file_content):
match_start = match.start()
match_end = match.end()
index_relative_to_remaining_unprocessed = len(source_file_content) - len(remaining_unprocessed_file_content)
unprocessed_text_pre_match = remaining_unprocessed_file_content[0:match_start - index_relative_to_remaining_unprocessed]
remaining_unprocessed_file_content = remaining_unprocessed_file_content[match_end - index_relative_to_remaining_unprocessed:]
transformer_name = match[2]
transformer_attribute = match[4]
transformer_class_type = self.config.transformers.get(transformer_name, None)
if transformer_class_type is None:
raise Exception(f"No transformer found for {transformer_name}")
transformer_instance = transformer_class_type(
runner=self, source_filepath=source_filepath, attribute=transformer_attribute
)
if run_test is True:
transformer_instance.test()
transformed_content = transformer_instance.transform()
rendered_file_content += f"{unprocessed_text_pre_match}{transformed_content}"
rendered_file_content += remaining_unprocessed_file_content
with open(output_filepath, 'w+') as output_file:
output_file.write(rendered_file_content)
except Exception as e:
logging.warning(e)
def _run_with_filepath(self, source_filepath: str, run_test: bool):
source_filepath_object = Path(source_filepath)
formatted_output_filename = source_filepath_object.name[2:]
output_filepath = os.path.join(source_filepath_object.parent, formatted_output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_test)
def _run_in_folder(self, dirpath: str, run_tests: bool):
for root_dirpath, dirs, filenames in os.walk(dirpath):
for filename in filenames:
if filename[0:2] == '__':
source_filepath = os.path.join(root_dirpath, filename)
output_filename = filename[2:]
output_filepath = os.path.join(root_dirpath, output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_tests)
def _start(self, run_tests: bool):
self._run_in_folder(dirpath=self.base_dirpath, run_tests=run_tests)
self.watcher.start()
def start(self):
self._start(run_tests=False)
def start_with_tests(self):
self._start(run_tests=True)
| true | true |
f72b239857d42e26a3ecdb3d5902e5cf5b358e32 | 2,569 | py | Python | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 48 | 2018-09-26T03:35:37.000Z | 2022-03-20T05:05:56.000Z | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 10 | 2018-07-19T21:16:22.000Z | 2021-09-06T22:21:01.000Z | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 6 | 2020-02-06T01:33:54.000Z | 2021-08-29T21:20:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, Kevin Laeufer <ekiwi@berkeley.edu>
# Generates the `dut.hpp` file which contains dut specific interface code
# from the TOML dut description file.
import os, sys, argparse
import toml
template = """
// This file was generated from {conf_toml} using the dut_gen.py script.
// It contains DUt specific interface code for the verilator C++ test harness.
#ifndef DUT_CONF_HPP
#define DUT_CONF_HPP
#if defined(E2E)
#include <V{toplevel}_E2EHarness.h>
#define TOP_TYPE V{toplevel}_E2EHarness
#else
#include <V{toplevel}_VHarness.h>
#define TOP_TYPE V{toplevel}_VHarness
#endif
#define TOPLEVEL_STR "{toplevel}"
static constexpr size_t CoverageSize = {cov_size};
static constexpr size_t InputSize = {input_size};
static inline void apply_input(TOP_TYPE* top, const uint8_t* input) {{
{apply_input}
}}
static inline void read_coverage(TOP_TYPE* top, uint8_t* coverage) {{
{read_coverage}
}}
#endif // DUT_CONF_HPP
"""
align = 8
def bits_to_size(bits):
bytes = (bits + 7) // 8
words = (bytes + align - 1) // align
return words * align
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='generate DUT specific verilator code')
parser.add_argument('-o', '--output', help='dut header file name', required=True)
parser.add_argument('-i', '--input', help='toml dut description', required=True)
args = parser.parse_args()
conf_toml = args.input
if not os.path.isfile(conf_toml):
sys.stderr.write("dur config file `{}` not found\n".format(conf_toml))
sys.exit(1)
header = args.output
header_dir = os.path.dirname(os.path.abspath(header))
if not os.path.isdir(header_dir):
sys.stderr.write("output directory `{}` does not exist\n".format(header_dir))
sys.exit(1)
conf = toml.loads(open(conf_toml).read())
input_bits = sum(ii['width'] for ii in conf['input'])
input_size = bits_to_size(input_bits)
cov_bits = sum(counter['width'] for counter in conf['counter'])
# the cycles count in front of the coverage feedback takes 16bit
cov_size = bits_to_size(cov_bits + 2 * 8) - 2
i_line = "\ttop->io_input_bytes_{0: <3} = input[{0: >3}];"
c_line = "\tcoverage[{0: >3}] = top->io_coverage_bytes_{0};"
dd = { 'conf_toml': conf_toml, 'toplevel': conf['general']['top'],
'cov_size': cov_size, 'input_size': input_size,
'apply_input': "\n".join(i_line.format(ii) for ii in range(input_size)),
'read_coverage': "\n".join(c_line.format(ii) for ii in range(cov_size))
}
output = template.format(**dd)
open(header, 'w').write(output) | 30.583333 | 82 | 0.708836 |
import os, sys, argparse
import toml
template = """
// This file was generated from {conf_toml} using the dut_gen.py script.
// It contains DUt specific interface code for the verilator C++ test harness.
#ifndef DUT_CONF_HPP
#define DUT_CONF_HPP
#if defined(E2E)
#include <V{toplevel}_E2EHarness.h>
#define TOP_TYPE V{toplevel}_E2EHarness
#else
#include <V{toplevel}_VHarness.h>
#define TOP_TYPE V{toplevel}_VHarness
#endif
#define TOPLEVEL_STR "{toplevel}"
static constexpr size_t CoverageSize = {cov_size};
static constexpr size_t InputSize = {input_size};
static inline void apply_input(TOP_TYPE* top, const uint8_t* input) {{
{apply_input}
}}
static inline void read_coverage(TOP_TYPE* top, uint8_t* coverage) {{
{read_coverage}
}}
#endif // DUT_CONF_HPP
"""
align = 8
def bits_to_size(bits):
bytes = (bits + 7) // 8
words = (bytes + align - 1) // align
return words * align
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='generate DUT specific verilator code')
parser.add_argument('-o', '--output', help='dut header file name', required=True)
parser.add_argument('-i', '--input', help='toml dut description', required=True)
args = parser.parse_args()
conf_toml = args.input
if not os.path.isfile(conf_toml):
sys.stderr.write("dur config file `{}` not found\n".format(conf_toml))
sys.exit(1)
header = args.output
header_dir = os.path.dirname(os.path.abspath(header))
if not os.path.isdir(header_dir):
sys.stderr.write("output directory `{}` does not exist\n".format(header_dir))
sys.exit(1)
conf = toml.loads(open(conf_toml).read())
input_bits = sum(ii['width'] for ii in conf['input'])
input_size = bits_to_size(input_bits)
cov_bits = sum(counter['width'] for counter in conf['counter'])
cov_size = bits_to_size(cov_bits + 2 * 8) - 2
i_line = "\ttop->io_input_bytes_{0: <3} = input[{0: >3}];"
c_line = "\tcoverage[{0: >3}] = top->io_coverage_bytes_{0};"
dd = { 'conf_toml': conf_toml, 'toplevel': conf['general']['top'],
'cov_size': cov_size, 'input_size': input_size,
'apply_input': "\n".join(i_line.format(ii) for ii in range(input_size)),
'read_coverage': "\n".join(c_line.format(ii) for ii in range(cov_size))
}
output = template.format(**dd)
open(header, 'w').write(output) | true | true |
f72b24aadd868431479c08d35f7980c4d40e563c | 5,289 | py | Python | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 2 | 2019-11-07T10:17:40.000Z | 2020-04-13T14:25:14.000Z | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 7 | 2019-12-16T22:22:25.000Z | 2022-02-10T00:37:34.000Z | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 1 | 2020-01-07T09:12:21.000Z | 2020-01-07T09:12:21.000Z | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068. (https://arxiv.org/pdf/1706.06978.pdf)
"""
from tensorflow.python.keras.layers import Dense,Concatenate, Flatten
from tensorflow.python.keras.models import Model
from ..inputs import build_input_features,create_embedding_matrix,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,varlen_embedding_lookup,get_varlen_pooling_list,combined_dnn_input
from ..layers.core import DNN, PredictionLayer
from ..layers.sequence import AttentionSequencePoolingLayer
from ..layers.utils import concat_fun, NoMask
def DIN(dnn_feature_columns, history_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False,
dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice",
att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024,
task='binary'):
"""Instantiates the Deep Interest Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param history_feature_list: list,to indicate sequence sparse field
:param embedding_size: positive integer,sparse feature embedding_size.
:param hist_len_max: positive int, to indicate the max length of seq input
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param dnn_activation: Activation function to use in deep net
:param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net
:param att_activation: Activation function to use in attention net
:param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(dnn_feature_columns)
sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
history_feature_columns = []
sparse_varlen_feature_columns = []
history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if feature_name in history_fc_names:
history_feature_columns.append(fc)
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
embedding_dict = create_embedding_matrix(dnn_feature_columns,l2_reg_embedding,init_std,seed,embedding_size, prefix="")
query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,history_feature_list,history_feature_list)#query是单独的
keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names)
dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=history_feature_list)
dense_value_list = get_dense_input(features, dense_feature_columns)
sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_fun(keys_emb_list,mask=True)
deep_input_emb = concat_fun(dnn_input_emb_list)
query_emb = concat_fun(query_emb_list,mask=True)
hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,
weight_normalization=att_weight_normalization, supports_masking=True)([
query_emb, keys_emb])
deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
deep_input_emb = Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
| 52.366337 | 256 | 0.772169 |
from tensorflow.python.keras.layers import Dense,Concatenate, Flatten
from tensorflow.python.keras.models import Model
from ..inputs import build_input_features,create_embedding_matrix,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,varlen_embedding_lookup,get_varlen_pooling_list,combined_dnn_input
from ..layers.core import DNN, PredictionLayer
from ..layers.sequence import AttentionSequencePoolingLayer
from ..layers.utils import concat_fun, NoMask
def DIN(dnn_feature_columns, history_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False,
dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice",
att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024,
task='binary'):
features = build_input_features(dnn_feature_columns)
sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
history_feature_columns = []
sparse_varlen_feature_columns = []
history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if feature_name in history_fc_names:
history_feature_columns.append(fc)
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
embedding_dict = create_embedding_matrix(dnn_feature_columns,l2_reg_embedding,init_std,seed,embedding_size, prefix="")
query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,history_feature_list,history_feature_list)
keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names)
dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=history_feature_list)
dense_value_list = get_dense_input(features, dense_feature_columns)
sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_fun(keys_emb_list,mask=True)
deep_input_emb = concat_fun(dnn_input_emb_list)
query_emb = concat_fun(query_emb_list,mask=True)
hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,
weight_normalization=att_weight_normalization, supports_masking=True)([
query_emb, keys_emb])
deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
deep_input_emb = Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
| true | true |
f72b252e105b7da5db34c619077f0de2012fa5c7 | 301 | py | Python | deeplab3/evaluators/__init__.py | crmauceri/pytorch-deeplab-xception | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | 1 | 2021-12-11T08:21:19.000Z | 2021-12-11T08:21:19.000Z | deeplab3/evaluators/__init__.py | crmauceri/rgbd_deeplab | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | null | null | null | deeplab3/evaluators/__init__.py | crmauceri/rgbd_deeplab | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | null | null | null | from deeplab3.evaluators.segmentation_evaluator import SegmentationEvaluator
def make_evaluator(cfg, num_classes):
if cfg.EVALUATOR.NAME == "segmentation":
return SegmentationEvaluator(num_classes)
else:
raise ValueError("Model not implemented: {}".format(cfg.EVALUATOR.NAME)) | 43 | 80 | 0.76412 | from deeplab3.evaluators.segmentation_evaluator import SegmentationEvaluator
def make_evaluator(cfg, num_classes):
if cfg.EVALUATOR.NAME == "segmentation":
return SegmentationEvaluator(num_classes)
else:
raise ValueError("Model not implemented: {}".format(cfg.EVALUATOR.NAME)) | true | true |
f72b25859b28cd579a78605dc1ed921ca8af258c | 3,159 | py | Python | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | 1 | 2021-02-23T00:12:43.000Z | 2021-02-23T00:12:43.000Z | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | null | null | null | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | 1 | 2021-02-23T06:04:13.000Z | 2021-02-23T06:04:13.000Z | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class ClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "Client Connection"
verbose_name_plural = "Client Connections"
@property
def title(self):
return str(self.ip)
# idk why tf i made these two seperate models, but now i'm too lazy to change
class UserClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "User Client Connection"
verbose_name_plural = "User Client Connections"
@property
def title(self):
return str(self.ip)
class MovieView(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
movie_id = models.CharField(max_length=512)
media_type = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return self.ip
class Meta:
verbose_name = "Movie View"
verbose_name_plural = "Movie Views"
@property
def title(self):
return str(self.ip) | 41.565789 | 83 | 0.719848 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class ClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "Client Connection"
verbose_name_plural = "Client Connections"
@property
def title(self):
return str(self.ip)
class UserClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "User Client Connection"
verbose_name_plural = "User Client Connections"
@property
def title(self):
return str(self.ip)
class MovieView(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
movie_id = models.CharField(max_length=512)
media_type = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return self.ip
class Meta:
verbose_name = "Movie View"
verbose_name_plural = "Movie Views"
@property
def title(self):
return str(self.ip) | true | true |
f72b2611795c1b7d27319858d6c69d00eadf80ef | 32,514 | py | Python | old_projects/eola/chapter8p2.py | thevivekpandey/manim | 483dbfc232fa684e7722969221bd416fde8bd55a | [
"MIT"
] | 9 | 2019-12-17T04:59:53.000Z | 2020-11-10T21:02:41.000Z | old_projects/eola/chapter8p2.py | Hammer7/manim | a19a6317ec187f65efb0c8f46bc613b4a978d22a | [
"MIT"
] | 5 | 2021-03-19T03:01:04.000Z | 2022-03-11T23:57:24.000Z | old_projects/eola/chapter8p2.py | Hammer7/manim | a19a6317ec187f65efb0c8f46bc613b4a978d22a | [
"MIT"
] | 3 | 2020-04-12T16:50:57.000Z | 2020-07-19T17:53:53.000Z | from manimlib.imports import *
from old_projects.eola.chapter5 import get_det_text
from old_projects.eola.chapter8 import *
class OpeningQuote(Scene):
def construct(self):
words = TextMobject(
"From [Grothendieck], I have also learned not",
"to take glory in the ",
"difficulty of a proof:",
"difficulty means we have not understood.",
"The idea is to be able to ",
"paint a landscape",
"in which the proof is obvious.",
arg_separator = " "
)
words.set_color_by_tex("difficulty of a proof:", RED)
words.set_color_by_tex("paint a landscape", GREEN)
words.set_width(FRAME_WIDTH - 2)
words.to_edge(UP)
author = TextMobject("-Pierre Deligne")
author.set_color(YELLOW)
author.next_to(words, DOWN, buff = 0.5)
self.play(FadeIn(words))
self.wait(4)
self.play(Write(author, run_time = 3))
self.wait()
class CrossProductSymbols(Scene):
def construct(self):
v_tex, w_tex, p_tex = get_vect_tex(*"vwp")
equation = TexMobject(
v_tex, "\\times", w_tex, "=", p_tex
)
equation.set_color_by_tex(v_tex, V_COLOR)
equation.set_color_by_tex(w_tex, W_COLOR)
equation.set_color_by_tex(p_tex, P_COLOR)
brace = Brace(equation[-1])
brace.stretch_to_fit_width(0.7)
vector_text = brace.get_text("Vector")
vector_text.set_color(RED)
self.add(equation)
self.play(*list(map(Write, [brace, vector_text])))
self.wait()
class DeterminantTrickCopy(DeterminantTrick):
pass
class BruteForceVerification(Scene):
def construct(self):
v = Matrix(["v_1", "v_2", "v_3"])
w = Matrix(["w_1", "w_2", "w_3"])
v1, v2, v3 = v.get_entries()
w1, w2, w3 = w.get_entries()
v.set_color(V_COLOR)
w.set_color(W_COLOR)
def get_term(e1, e2, e3, e4):
group = VGroup(
e1.copy(), e2.copy(),
TexMobject("-"),
e3.copy(), e4.copy(),
)
group.arrange()
return group
cross = Matrix(list(it.starmap(get_term, [
(v2, w3, v3, w2),
(v3, w1, v1, w3),
(v2, w3, v3, w2),
])))
cross_product = VGroup(
v.copy(), TexMobject("\\times"), w.copy(),
TexMobject("="), cross.copy()
)
cross_product.arrange()
cross_product.scale(0.75)
formula_word = TextMobject("Numerical formula")
computation_words = TextMobject("""
Facts you could (painfully)
verify computationally
""")
computation_words.scale(0.75)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
v_line = Line(UP, DOWN).scale(FRAME_Y_RADIUS)
computation_words.to_edge(UP, buff = MED_SMALL_BUFF/2)
h_line.next_to(computation_words, DOWN)
formula_word.next_to(h_line, UP, buff = MED_SMALL_BUFF)
computation_words.shift(FRAME_X_RADIUS*RIGHT/2)
formula_word.shift(FRAME_X_RADIUS*LEFT/2)
cross_product.next_to(formula_word, DOWN, buff = LARGE_BUFF)
self.add(formula_word, computation_words)
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
Write(cross_product)
)
v_tex, w_tex = get_vect_tex(*"vw")
v_dot, w_dot = [
TexMobject(
tex, "\\cdot",
"(", v_tex, "\\times", w_tex, ")",
"= 0"
)
for tex in (v_tex, w_tex)
]
theta_def = TexMobject(
"\\theta",
"= \\cos^{-1} \\big(", v_tex, "\\cdot", w_tex, "/",
"(||", v_tex, "||", "\\cdot", "||", w_tex, "||)", "\\big)"
)
length_check = TexMobject(
"||", "(", v_tex, "\\times", w_tex, ")", "|| = ",
"(||", v_tex, "||)",
"(||", w_tex, "||)",
"\\sin(", "\\theta", ")"
)
last_point = h_line.get_center()+FRAME_X_RADIUS*RIGHT/2
max_width = FRAME_X_RADIUS-1
for mob in v_dot, w_dot, theta_def, length_check:
mob.set_color_by_tex(v_tex, V_COLOR)
mob.set_color_by_tex(w_tex, W_COLOR)
mob.set_color_by_tex("\\theta", GREEN)
mob.next_to(last_point, DOWN, buff = MED_SMALL_BUFF)
if mob.get_width() > max_width:
mob.set_width(max_width)
last_point = mob
self.play(FadeIn(mob))
self.wait()
class ButWeCanDoBetter(TeacherStudentsScene):
def construct(self):
self.teacher_says("But we can do \\\\ better than that")
self.change_student_modes(*["happy"]*3)
self.random_blink(3)
class Prerequisites(Scene):
def construct(self):
title = TextMobject("Prerequisites")
title.to_edge(UP)
title.set_color(YELLOW)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_width(FRAME_X_RADIUS - 1)
left_rect, right_rect = [
rect.copy().shift(DOWN/2).to_edge(edge)
for edge in (LEFT, RIGHT)
]
chapter5 = TextMobject("""
\\centering
Chapter 5
Determinants
""")
chapter7 = TextMobject("""
\\centering
Chapter 7:
Dot products and duality
""")
self.add(title)
for chapter, rect in (chapter5, left_rect), (chapter7, right_rect):
if chapter.get_width() > rect.get_width():
chapter.set_width(rect.get_width())
chapter.next_to(rect, UP)
self.play(
Write(chapter5),
ShowCreation(left_rect)
)
self.play(
Write(chapter7),
ShowCreation(right_rect)
)
self.wait()
class DualityReview(TeacherStudentsScene):
def construct(self):
words = TextMobject("Quick", "duality", "review")
words[1].set_color_by_gradient(BLUE, YELLOW)
self.teacher_says(words, target_mode = "surprised")
self.change_student_modes("pondering")
self.random_blink(2)
class DotProductToTransformSymbol(Scene):
CONFIG = {
"vect_coords" : [2, 1]
}
def construct(self):
v_mob = TexMobject(get_vect_tex("v"))
v_mob.set_color(V_COLOR)
matrix = Matrix([self.vect_coords])
vector = Matrix(self.vect_coords)
matrix.set_column_colors(X_COLOR, Y_COLOR)
vector.set_column_colors(YELLOW)
_input = Matrix(["x", "y"])
_input.get_entries().set_color_by_gradient(X_COLOR, Y_COLOR)
left_input, right_input = [_input.copy() for x in range(2)]
dot, equals = list(map(TexMobject, ["\\cdot", "="]))
equation = VGroup(
vector, dot, left_input, equals,
matrix, right_input
)
equation.arrange()
left_brace = Brace(VGroup(vector, left_input))
right_brace = Brace(matrix, UP)
left_words = left_brace.get_text("Dot product")
right_words = right_brace.get_text("Transform")
right_words.set_width(right_brace.get_width())
right_v_brace = Brace(right_input, UP)
right_v_mob = v_mob.copy()
right_v_brace.put_at_tip(right_v_mob)
right_input.add(right_v_brace, right_v_mob)
left_v_brace = Brace(left_input, UP)
left_v_mob = v_mob.copy()
left_v_brace.put_at_tip(left_v_mob)
left_input.add(left_v_brace, left_v_mob)
self.add(matrix, right_input)
self.play(
GrowFromCenter(right_brace),
Write(right_words, run_time = 1)
)
self.wait()
self.play(
Write(equals),
Write(dot),
Transform(matrix.copy(), vector),
Transform(right_input.copy(), left_input)
)
self.play(
GrowFromCenter(left_brace),
Write(left_words, run_time = 1)
)
self.wait()
class MathematicalWild(Scene):
def construct(self):
title = TextMobject("In the mathematical wild")
title.to_edge(UP)
self.add(title)
randy = Randolph()
randy.shift(DOWN)
bubble = ThoughtBubble(width = 5, height = 4)
bubble.write("""
\\centering
Some linear
transformation
to the number line
""")
bubble.content.set_color(BLUE)
bubble.content.shift(MED_SMALL_BUFF*UP/2)
bubble.remove(*bubble[:-1])
bubble.add(bubble.content)
bubble.next_to(randy.get_corner(UP+RIGHT), RIGHT)
vector = Vector([1, 2])
vector.move_to(randy.get_corner(UP+LEFT), aligned_edge = DOWN+LEFT)
dual_words = TextMobject("Dual vector")
dual_words.set_color_by_gradient(BLUE, YELLOW)
dual_words.next_to(vector, LEFT)
self.add(randy)
self.play(Blink(randy))
self.play(FadeIn(bubble))
self.play(randy.change_mode, "sassy")
self.play(Blink(randy))
self.wait()
self.play(randy.look, UP+LEFT)
self.play(
ShowCreation(vector),
randy.change_mode, "raise_right_hand"
)
self.wait()
self.play(Write(dual_words))
self.play(Blink(randy))
self.wait()
class ThreeStepPlan(Scene):
def construct(self):
title = TextMobject("The plan")
title.set_color(YELLOW)
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(title, DOWN)
v_tex, w_tex = get_vect_tex(*"vw")
v_text, w_text, cross_text = [
"$%s$"%s
for s in (v_tex, w_tex, v_tex + "\\times" + w_tex)
]
steps = [
TextMobject(
"1. Define a 3d-to-1d", "linear \\\\", "transformation",
"in terms of", v_text, "and", w_text
),
TextMobject(
"2. Find its", "dual vector"
),
TextMobject(
"3. Show that this dual is", cross_text
)
]
linear, transformation = steps[0][1:1+2]
steps[0].set_color_by_tex(v_text, V_COLOR)
steps[0].set_color_by_tex(w_text, W_COLOR)
steps[1][1].set_color_by_gradient(BLUE, YELLOW)
steps[2].set_color_by_tex(cross_text, P_COLOR)
VGroup(*steps).arrange(
DOWN, aligned_edge = LEFT, buff = LARGE_BUFF
).next_to(h_line, DOWN, buff = MED_SMALL_BUFF)
self.add(title)
self.play(ShowCreation(h_line))
for step in steps:
self.play(Write(step, run_time = 2))
self.wait()
linear_transformation = TextMobject("Linear", "transformation")
linear_transformation.next_to(h_line, DOWN, MED_SMALL_BUFF)
det = self.get_det()
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(3.5)
left_right_arrow = TexMobject("\\Leftrightarrow")
left_right_arrow.shift(DOWN)
det.next_to(left_right_arrow, LEFT)
rect.next_to(left_right_arrow, RIGHT)
steps[0].remove(linear, transformation)
self.play(
Transform(
VGroup(linear, transformation),
linear_transformation
),
*list(map(FadeOut, steps))
)
self.wait()
self.play(Write(left_right_arrow))
self.play(Write(det))
self.play(ShowCreation(rect))
self.wait(0)
def get_det(self):
matrix = Matrix(np.array([
["\\hat{\\imath}", "\\hat{\\jmath}", "\\hat{k}"],
["v_%d"%d for d in range(1, 4)],
["w_%d"%d for d in range(1, 4)],
]).T)
matrix.set_column_colors(X_COLOR, V_COLOR, W_COLOR)
matrix.get_mob_matrix()[1, 0].set_color(Y_COLOR)
matrix.get_mob_matrix()[2, 0].set_color(Z_COLOR)
VGroup(*matrix.get_mob_matrix()[1, 1:]).shift(0.15*DOWN)
VGroup(*matrix.get_mob_matrix()[2, 1:]).shift(0.35*DOWN)
det_text = get_det_text(matrix)
det_text.add(matrix)
return det_text
class DefineDualTransform(Scene):
def construct(self):
self.add_title()
self.show_triple_cross_product()
self.write_function()
self.introduce_dual_vector()
self.expand_dot_product()
self.ask_question()
def add_title(self):
title = TextMobject("What a student might think")
title.not_real = TextMobject("Not the real cross product")
for mob in title, title.not_real:
mob.set_width(FRAME_X_RADIUS - 1)
mob.set_color(RED)
mob.to_edge(UP)
self.add(title)
self.title = title
def show_triple_cross_product(self):
colors = [WHITE, ORANGE, W_COLOR]
tex_mobs = list(map(TexMobject, get_vect_tex(*"uvw")))
u_tex, v_tex, w_tex = tex_mobs
arrays = [
Matrix(["%s_%d"%(s, d) for d in range(1, 4)])
for s in "uvw"
]
defs_equals = VGroup()
definitions = VGroup()
for array, tex_mob, color in zip(arrays, tex_mobs, colors):
array.set_column_colors(color)
tex_mob.set_color(color)
equals = TexMobject("=")
definition = VGroup(tex_mob, equals, array)
definition.arrange(RIGHT)
definitions.add(definition)
defs_equals.add(equals)
definitions.arrange(buff = MED_SMALL_BUFF)
definitions.shift(2*DOWN)
mobs_with_targets = list(it.chain(
tex_mobs, *[a.get_entries() for a in arrays]
))
for mob in mobs_with_targets:
mob.target = mob.copy()
matrix = Matrix(np.array([
[e.target for e in array.get_entries()]
for array in arrays
]).T)
det_text = get_det_text(matrix, background_rect = False)
syms = times1, times2, equals = [
TexMobject(sym)
for sym in ("\\times", "\\times", "=",)
]
triple_cross = VGroup(
u_tex.target, times1, v_tex.target, times2, w_tex.target, equals
)
triple_cross.arrange()
final_mobs = VGroup(triple_cross, VGroup(det_text, matrix))
final_mobs.arrange()
final_mobs.next_to(self.title, DOWN, buff = MED_SMALL_BUFF)
for mob in definitions, final_mobs:
mob.set_width(FRAME_X_RADIUS - 1)
for array in arrays:
brackets = array.get_brackets()
brackets.target = matrix.get_brackets()
mobs_with_targets.append(brackets)
for def_equals in defs_equals:
def_equals.target = equals
mobs_with_targets.append(def_equals)
self.play(FadeIn(
definitions,
run_time = 2,
lag_ratio = 0.5
))
self.wait(2)
self.play(*[
Transform(mob.copy(), mob.target)
for mob in tex_mobs
] + [
Write(times1),
Write(times2),
])
triple_cross.add(*self.get_mobjects_from_last_animation()[:3])
self.play(*[
Transform(mob.copy(), mob.target)
for mob in mobs_with_targets
if mob not in tex_mobs
])
u_entries = self.get_mobjects_from_last_animation()[:3]
v_entries = self.get_mobjects_from_last_animation()[3:6]
w_entries = self.get_mobjects_from_last_animation()[6:9]
self.play(Write(det_text))
self.wait(2)
self.det_text = det_text
self.definitions = definitions
self.u_entries = u_entries
self.v_entries = v_entries
self.w_entries = w_entries
self.matrix = matrix
self.triple_cross = triple_cross
self.v_tex, self.w_tex = v_tex, w_tex
self.equals = equals
def write_function(self):
brace = Brace(self.det_text, DOWN)
number_text = brace.get_text("Number")
self.play(Transform(self.title, self.title.not_real))
self.wait()
self.play(FadeOut(self.definitions))
self.play(
GrowFromCenter(brace),
Write(number_text)
)
self.wait()
x, y, z = variables = list(map(TexMobject, "xyz"))
for var, entry in zip(variables, self.u_entries):
var.scale(0.8)
var.move_to(entry)
entry.target = var
brace.target = Brace(z)
brace.target.stretch_to_fit_width(0.5)
number_text.target = brace.target.get_text("Variable")
v_brace = Brace(self.matrix.get_mob_matrix()[0, 1], UP)
w_brace = Brace(self.matrix.get_mob_matrix()[0, 2], UP)
for vect_brace, tex in (v_brace, self.v_tex), (w_brace, self.w_tex):
vect_brace.stretch_to_fit_width(brace.target.get_width())
new_tex = tex.copy()
vect_brace.put_at_tip(new_tex)
vect_brace.tex = new_tex
func_tex = TexMobject(
"f\\left(%s\\right)"%matrix_to_tex_string(list("xyz"))
)
func_tex.scale(0.7)
func_input = Matrix(list("xyz"))
func_input_template = VGroup(*func_tex[3:-2])
func_input.set_height(func_input_template.get_height())
func_input.next_to(VGroup(*func_tex[:3]), RIGHT)
VGroup(*func_tex[-2:]).next_to(func_input, RIGHT)
func_tex[0].scale_in_place(1.5)
func_tex = VGroup(
VGroup(*[func_tex[i] for i in (0, 1, 2, -2, -1)]),
func_input
)
func_tex.next_to(self.equals, LEFT)
self.play(
FadeOut(self.title),
FadeOut(self.triple_cross),
*[
Transform(mob, mob.target)
for mob in [brace, number_text]
]
)
self.play(*[
Transform(mob, mob.target)
for mob in self.u_entries
])
self.play(*[
Write(VGroup(vect_brace, vect_brace.tex))
for vect_brace in (v_brace, w_brace)
])
self.wait()
self.play(Write(func_tex))
self.wait()
self.func_tex = func_tex
self.variables_text = VGroup(brace, number_text)
def introduce_dual_vector(self):
everything = VGroup(*self.get_mobjects())
colors = [X_COLOR, Y_COLOR, Z_COLOR]
q_marks = VGroup(*list(map(TextMobject, "???")))
q_marks.scale(2)
q_marks.set_color_by_gradient(*colors)
title = VGroup(TextMobject("This function is linear"))
title.set_color(GREEN)
title.to_edge(UP)
matrix = Matrix([list(q_marks.copy())])
matrix.set_height(self.func_tex.get_height()/2)
dual_vector = Matrix(list(q_marks))
dual_vector.set_height(self.func_tex.get_height())
dual_vector.get_brackets()[0].shift(0.2*LEFT)
dual_vector.get_entries().shift(0.1*LEFT)
dual_vector.scale(1.25)
dual_dot = VGroup(
dual_vector,
TexMobject("\\cdot").next_to(dual_vector)
)
matrix_words = TextMobject("""
$1 \\times 3$ matrix encoding the
3d-to-1d linear transformation
""")
self.play(
Write(title, run_time = 2),
everything.shift, DOWN
)
self.remove(everything)
self.add(*everything)
self.wait()
func, func_input = self.func_tex
func_input.target = func_input.copy()
func_input.target.scale(1.2)
func_input.target.move_to(self.func_tex, aligned_edge = RIGHT)
matrix.next_to(func_input.target, LEFT)
dual_dot.next_to(func_input.target, LEFT)
matrix_words.next_to(matrix, DOWN, buff = 1.5)
matrix_words.shift_onto_screen()
matrix_arrow = Arrow(
matrix_words.get_top(),
matrix.get_bottom(),
color = WHITE
)
self.play(
Transform(func, matrix),
MoveToTarget(func_input),
FadeOut(self.variables_text),
)
self.wait()
self.play(
Write(matrix_words),
ShowCreation(matrix_arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [matrix_words, matrix_arrow])))
self.play(
Transform(func, dual_vector),
Write(dual_dot[1])
)
self.wait()
p_coords = VGroup(*list(map(TexMobject, [
"p_%d"%d for d in range(1, 4)
])))
p_coords.set_color(RED)
p_array = Matrix(list(p_coords))
p_array.set_height(dual_vector.get_height())
p_array.move_to(dual_vector, aligned_edge = RIGHT)
p_brace = Brace(p_array, UP)
p_tex = TexMobject(get_vect_tex("p"))
p_tex.set_color(P_COLOR)
p_brace.put_at_tip(p_tex)
self.play(
GrowFromCenter(p_brace),
Write(p_tex)
)
self.play(Transform(
func, p_array,
run_time = 2,
lag_ratio = 0.5
))
self.remove(func)
self.add(p_array)
self.wait()
self.play(FadeOut(title))
self.wait()
self.p_array = p_array
self.input_array = func_input
def expand_dot_product(self):
everything = VGroup(*self.get_mobjects())
self.play(everything.to_edge, UP)
self.remove(everything)
self.add(*everything)
to_fade = VGroup()
p_entries = self.p_array.get_entries()
input_entries = self.input_array.get_entries()
dot_components = VGroup()
for p, x, i in zip(p_entries, input_entries, it.count()):
if i == 2:
x.sym = TexMobject("=")
else:
x.sym = TexMobject("+")
p.sym = TexMobject("\\cdot")
p.target = p.copy().scale(2)
x.target = x.copy().scale(2)
component = VGroup(p.target, p.sym, x.target, x.sym)
component.arrange()
dot_components.add(component)
dot_components.arrange()
dot_components.next_to(ORIGIN, LEFT)
dot_components.shift(1.5*DOWN)
dot_arrow = Arrow(self.p_array.get_corner(DOWN+RIGHT), dot_components)
to_fade.add(dot_arrow)
self.play(ShowCreation(dot_arrow))
new_ps = VGroup()
for p, x in zip(p_entries, input_entries):
self.play(
MoveToTarget(p.copy()),
MoveToTarget(x.copy()),
Write(p.sym),
Write(x.sym)
)
mobs = self.get_mobjects_from_last_animation()
new_ps.add(mobs[0])
to_fade.add(*mobs[1:])
self.wait()
x, y, z = self.u_entries
v1, v2, v3 = self.v_entries
w1, w2, w3 = self.w_entries
cross_components = VGroup()
quints = [
(x, v2, w3, v3, w2),
(y, v3, w1, v1, w3),
(z, v1, w2, v2, w1),
]
quints = [
[m.copy() for m in quint]
for quint in quints
]
for i, quint in enumerate(quints):
sym_strings = ["(", "\\cdot", "-", "\\cdot", ")"]
if i < 2:
sym_strings[-1] += "+"
syms = list(map(TexMobject, sym_strings))
for mob, sym in zip(quint, syms):
mob.target = mob.copy()
mob.target.scale(1.5)
mob.sym = sym
quint_targets = [mob.target for mob in quint]
component = VGroup(*it.chain(*list(zip(quint_targets, syms))))
component.arrange()
cross_components.add(component)
to_fade.add(syms[0], syms[-1], quint[0])
cross_components.arrange(DOWN, aligned_edge = LEFT, buff = MED_SMALL_BUFF)
cross_components.next_to(dot_components, RIGHT)
for quint in quints:
self.play(*[
ApplyMethod(mob.set_color, YELLOW)
for mob in quint
])
self.wait(0.5)
self.play(*[
MoveToTarget(mob)
for mob in quint
] + [
Write(mob.sym)
for mob in quint
])
self.wait()
self.play(
ApplyFunction(
lambda m : m.arrange(
DOWN, buff = MED_SMALL_BUFF+SMALL_BUFF
).next_to(cross_components, LEFT),
new_ps
),
*list(map(FadeOut, to_fade))
)
self.play(*[
Write(TexMobject("=").next_to(p, buff = 2*SMALL_BUFF))
for p in new_ps
])
equals = self.get_mobjects_from_last_animation()
self.wait(2)
everything = everything.copy()
self.play(
FadeOut(VGroup(*self.get_mobjects())),
Animation(everything)
)
self.clear()
self.add(everything)
def ask_question(self):
everything = VGroup(*self.get_mobjects())
p_tex = "$%s$"%get_vect_tex("p")
question = TextMobject(
"What vector",
p_tex,
"has \\\\ the property that"
)
question.to_edge(UP)
question.set_color(YELLOW)
question.set_color_by_tex(p_tex, P_COLOR)
everything.target = everything.copy()
everything.target.next_to(
question, DOWN, buff = MED_SMALL_BUFF
)
self.play(
MoveToTarget(everything),
Write(question)
)
self.wait()
class WhyAreWeDoingThis(TeacherStudentsScene):
def construct(self):
self.student_says(
"Um...why are \\\\ we doing this?",
target_mode = "confused"
)
self.random_blink()
self.play(self.get_teacher().change_mode, "erm")
self.change_student_modes("plain", "confused", "raise_left_hand")
self.random_blink()
self.change_student_modes("pondering", "confused", "raise_left_hand")
self.random_blink(5)
class ThreeDTripleCrossProduct(Scene):
pass #Simple parallelepiped
class ThreeDMovingVariableVector(Scene):
pass #white u moves around
class ThreeDMovingVariableVectorWithCrossShowing(Scene):
pass #white u moves around, red p is present
class NowForTheCoolPart(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Now for the\\\\",
"cool part"
)
self.change_student_modes(*["happy"]*3)
self.random_blink(2)
self.teacher_says(
"Let's answer the same question,\\\\",
"but this time geometrically"
)
self.change_student_modes(*["pondering"]*3)
self.random_blink(2)
class ThreeDDotProductProjection(Scene):
pass #
class DotProductWords(Scene):
def construct(self):
p_tex = "$%s$"%get_vect_tex("p")
p_mob = TextMobject(p_tex)
p_mob.scale(1.5)
p_mob.set_color(P_COLOR)
input_array = Matrix(list("xyz"))
dot_product = VGroup(p_mob, Dot(radius = 0.07), input_array)
dot_product.arrange(buff = MED_SMALL_BUFF/2)
equals = TexMobject("=")
dot_product.next_to(equals, LEFT)
words = VGroup(*it.starmap(TextMobject, [
("(Length of projection)",),
("(Length of ", p_tex, ")",)
]))
times = TexMobject("\\times")
words[1].set_color_by_tex(p_tex, P_COLOR)
words[0].next_to(equals, RIGHT)
words[1].next_to(words[0], DOWN, aligned_edge = LEFT)
times.next_to(words[0], RIGHT)
everyone = VGroup(dot_product, equals, times, words)
everyone.center().set_width(FRAME_X_RADIUS - 1)
self.add(dot_product)
self.play(Write(equals))
self.play(Write(words[0]))
self.wait()
self.play(
Write(times),
Write(words[1])
)
self.wait()
class ThreeDProjectToPerpendicular(Scene):
pass #
class GeometricVolumeWords(Scene):
def construct(self):
v_tex, w_tex = [
"$%s$"%s
for s in get_vect_tex(*"vw")
]
words = VGroup(
TextMobject("(Area of", "parallelogram", ")$\\times$"),
TextMobject(
"(Component of $%s$"%matrix_to_tex_string(list("xyz")),
"perpendicular to", v_tex, "and", w_tex, ")"
)
)
words[0].set_color_by_tex("parallelogram", BLUE)
words[1].set_color_by_tex(v_tex, ORANGE)
words[1].set_color_by_tex(w_tex, W_COLOR)
words.arrange(RIGHT)
words.set_width(FRAME_WIDTH - 1)
words.to_edge(DOWN, buff = SMALL_BUFF)
for word in words:
self.play(Write(word))
self.wait()
class WriteXYZ(Scene):
def construct(self):
self.play(Write(Matrix(list("xyz"))))
self.wait()
class ThreeDDotProductWithCross(Scene):
pass
class CrossVectorEmphasisWords(Scene):
def construct(self):
v_tex, w_tex = ["$%s$"%s for s in get_vect_tex(*"vw")]
words = [
TextMobject("Perpendicular to", v_tex, "and", w_tex),
TextMobject("Length = (Area of ", "parallelogram", ")")
]
for word in words:
word.set_color_by_tex(v_tex, ORANGE)
word.set_color_by_tex(w_tex, W_COLOR)
word.set_color_by_tex("parallelogram", BLUE)
self.play(Write(word))
self.wait()
self.play(FadeOut(word))
class NextVideo(Scene):
def construct(self):
title = TextMobject("""
Next video: Change of basis
""")
title.to_edge(UP, buff = MED_SMALL_BUFF/2)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class ChangeOfBasisPreview(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_WIDTH,
"secondary_line_ratio" : 0
},
"t_matrix" : [[2, 1], [-1, 1]],
"i_target_color" : YELLOW,
"j_target_color" : MAROON_B,
"sum_color" : PINK,
"vector" : [-1, 2],
}
def construct(self):
randy = Randolph()
pinky = Mortimer(color = PINK)
randy.to_corner(DOWN+LEFT)
pinky.to_corner(DOWN+RIGHT)
self.plane.fade()
self.add_foreground_mobject(randy, pinky)
coords = Matrix(self.vector)
coords.add_to_back(BackgroundRectangle(coords))
self.add_foreground_mobject(coords)
coords.move_to(
randy.get_corner(UP+RIGHT),
aligned_edge = DOWN+LEFT
)
coords.target = coords.copy()
coords.target.move_to(
pinky.get_corner(UP+LEFT),
aligned_edge = DOWN+RIGHT
)
self.play(
Write(coords),
randy.change_mode, "speaking"
)
self.scale_basis_vectors()
self.apply_transposed_matrix(
self.t_matrix,
added_anims = [
MoveToTarget(coords),
ApplyMethod(pinky.change_mode, "speaking"),
ApplyMethod(randy.change_mode, "plain"),
]
)
self.play(
randy.change_mode, "erm",
self.i_hat.set_color, self.i_target_color,
self.j_hat.set_color, self.j_target_color,
)
self.i_hat.color = self.i_target_color
self.j_hat.color = self.j_target_color
self.scale_basis_vectors()
def scale_basis_vectors(self):
for vect in self.i_hat, self.j_hat:
vect.save_state()
self.play(self.i_hat.scale, self.vector[0])
self.play(self.j_hat.scale, self.vector[1])
self.play(self.j_hat.shift, self.i_hat.get_end())
sum_vect = Vector(self.j_hat.get_end(), color = self.sum_color)
self.play(ShowCreation(sum_vect))
self.wait(2)
self.play(
FadeOut(sum_vect),
self.i_hat.restore,
self.j_hat.restore,
)
self.wait()
| 32.975659 | 82 | 0.557637 | from manimlib.imports import *
from old_projects.eola.chapter5 import get_det_text
from old_projects.eola.chapter8 import *
class OpeningQuote(Scene):
def construct(self):
words = TextMobject(
"From [Grothendieck], I have also learned not",
"to take glory in the ",
"difficulty of a proof:",
"difficulty means we have not understood.",
"The idea is to be able to ",
"paint a landscape",
"in which the proof is obvious.",
arg_separator = " "
)
words.set_color_by_tex("difficulty of a proof:", RED)
words.set_color_by_tex("paint a landscape", GREEN)
words.set_width(FRAME_WIDTH - 2)
words.to_edge(UP)
author = TextMobject("-Pierre Deligne")
author.set_color(YELLOW)
author.next_to(words, DOWN, buff = 0.5)
self.play(FadeIn(words))
self.wait(4)
self.play(Write(author, run_time = 3))
self.wait()
class CrossProductSymbols(Scene):
def construct(self):
v_tex, w_tex, p_tex = get_vect_tex(*"vwp")
equation = TexMobject(
v_tex, "\\times", w_tex, "=", p_tex
)
equation.set_color_by_tex(v_tex, V_COLOR)
equation.set_color_by_tex(w_tex, W_COLOR)
equation.set_color_by_tex(p_tex, P_COLOR)
brace = Brace(equation[-1])
brace.stretch_to_fit_width(0.7)
vector_text = brace.get_text("Vector")
vector_text.set_color(RED)
self.add(equation)
self.play(*list(map(Write, [brace, vector_text])))
self.wait()
class DeterminantTrickCopy(DeterminantTrick):
pass
class BruteForceVerification(Scene):
def construct(self):
v = Matrix(["v_1", "v_2", "v_3"])
w = Matrix(["w_1", "w_2", "w_3"])
v1, v2, v3 = v.get_entries()
w1, w2, w3 = w.get_entries()
v.set_color(V_COLOR)
w.set_color(W_COLOR)
def get_term(e1, e2, e3, e4):
group = VGroup(
e1.copy(), e2.copy(),
TexMobject("-"),
e3.copy(), e4.copy(),
)
group.arrange()
return group
cross = Matrix(list(it.starmap(get_term, [
(v2, w3, v3, w2),
(v3, w1, v1, w3),
(v2, w3, v3, w2),
])))
cross_product = VGroup(
v.copy(), TexMobject("\\times"), w.copy(),
TexMobject("="), cross.copy()
)
cross_product.arrange()
cross_product.scale(0.75)
formula_word = TextMobject("Numerical formula")
computation_words = TextMobject("""
Facts you could (painfully)
verify computationally
""")
computation_words.scale(0.75)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
v_line = Line(UP, DOWN).scale(FRAME_Y_RADIUS)
computation_words.to_edge(UP, buff = MED_SMALL_BUFF/2)
h_line.next_to(computation_words, DOWN)
formula_word.next_to(h_line, UP, buff = MED_SMALL_BUFF)
computation_words.shift(FRAME_X_RADIUS*RIGHT/2)
formula_word.shift(FRAME_X_RADIUS*LEFT/2)
cross_product.next_to(formula_word, DOWN, buff = LARGE_BUFF)
self.add(formula_word, computation_words)
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
Write(cross_product)
)
v_tex, w_tex = get_vect_tex(*"vw")
v_dot, w_dot = [
TexMobject(
tex, "\\cdot",
"(", v_tex, "\\times", w_tex, ")",
"= 0"
)
for tex in (v_tex, w_tex)
]
theta_def = TexMobject(
"\\theta",
"= \\cos^{-1} \\big(", v_tex, "\\cdot", w_tex, "/",
"(||", v_tex, "||", "\\cdot", "||", w_tex, "||)", "\\big)"
)
length_check = TexMobject(
"||", "(", v_tex, "\\times", w_tex, ")", "|| = ",
"(||", v_tex, "||)",
"(||", w_tex, "||)",
"\\sin(", "\\theta", ")"
)
last_point = h_line.get_center()+FRAME_X_RADIUS*RIGHT/2
max_width = FRAME_X_RADIUS-1
for mob in v_dot, w_dot, theta_def, length_check:
mob.set_color_by_tex(v_tex, V_COLOR)
mob.set_color_by_tex(w_tex, W_COLOR)
mob.set_color_by_tex("\\theta", GREEN)
mob.next_to(last_point, DOWN, buff = MED_SMALL_BUFF)
if mob.get_width() > max_width:
mob.set_width(max_width)
last_point = mob
self.play(FadeIn(mob))
self.wait()
class ButWeCanDoBetter(TeacherStudentsScene):
def construct(self):
self.teacher_says("But we can do \\\\ better than that")
self.change_student_modes(*["happy"]*3)
self.random_blink(3)
class Prerequisites(Scene):
def construct(self):
title = TextMobject("Prerequisites")
title.to_edge(UP)
title.set_color(YELLOW)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_width(FRAME_X_RADIUS - 1)
left_rect, right_rect = [
rect.copy().shift(DOWN/2).to_edge(edge)
for edge in (LEFT, RIGHT)
]
chapter5 = TextMobject("""
\\centering
Chapter 5
Determinants
""")
chapter7 = TextMobject("""
\\centering
Chapter 7:
Dot products and duality
""")
self.add(title)
for chapter, rect in (chapter5, left_rect), (chapter7, right_rect):
if chapter.get_width() > rect.get_width():
chapter.set_width(rect.get_width())
chapter.next_to(rect, UP)
self.play(
Write(chapter5),
ShowCreation(left_rect)
)
self.play(
Write(chapter7),
ShowCreation(right_rect)
)
self.wait()
class DualityReview(TeacherStudentsScene):
def construct(self):
words = TextMobject("Quick", "duality", "review")
words[1].set_color_by_gradient(BLUE, YELLOW)
self.teacher_says(words, target_mode = "surprised")
self.change_student_modes("pondering")
self.random_blink(2)
class DotProductToTransformSymbol(Scene):
CONFIG = {
"vect_coords" : [2, 1]
}
def construct(self):
v_mob = TexMobject(get_vect_tex("v"))
v_mob.set_color(V_COLOR)
matrix = Matrix([self.vect_coords])
vector = Matrix(self.vect_coords)
matrix.set_column_colors(X_COLOR, Y_COLOR)
vector.set_column_colors(YELLOW)
_input = Matrix(["x", "y"])
_input.get_entries().set_color_by_gradient(X_COLOR, Y_COLOR)
left_input, right_input = [_input.copy() for x in range(2)]
dot, equals = list(map(TexMobject, ["\\cdot", "="]))
equation = VGroup(
vector, dot, left_input, equals,
matrix, right_input
)
equation.arrange()
left_brace = Brace(VGroup(vector, left_input))
right_brace = Brace(matrix, UP)
left_words = left_brace.get_text("Dot product")
right_words = right_brace.get_text("Transform")
right_words.set_width(right_brace.get_width())
right_v_brace = Brace(right_input, UP)
right_v_mob = v_mob.copy()
right_v_brace.put_at_tip(right_v_mob)
right_input.add(right_v_brace, right_v_mob)
left_v_brace = Brace(left_input, UP)
left_v_mob = v_mob.copy()
left_v_brace.put_at_tip(left_v_mob)
left_input.add(left_v_brace, left_v_mob)
self.add(matrix, right_input)
self.play(
GrowFromCenter(right_brace),
Write(right_words, run_time = 1)
)
self.wait()
self.play(
Write(equals),
Write(dot),
Transform(matrix.copy(), vector),
Transform(right_input.copy(), left_input)
)
self.play(
GrowFromCenter(left_brace),
Write(left_words, run_time = 1)
)
self.wait()
class MathematicalWild(Scene):
def construct(self):
title = TextMobject("In the mathematical wild")
title.to_edge(UP)
self.add(title)
randy = Randolph()
randy.shift(DOWN)
bubble = ThoughtBubble(width = 5, height = 4)
bubble.write("""
\\centering
Some linear
transformation
to the number line
""")
bubble.content.set_color(BLUE)
bubble.content.shift(MED_SMALL_BUFF*UP/2)
bubble.remove(*bubble[:-1])
bubble.add(bubble.content)
bubble.next_to(randy.get_corner(UP+RIGHT), RIGHT)
vector = Vector([1, 2])
vector.move_to(randy.get_corner(UP+LEFT), aligned_edge = DOWN+LEFT)
dual_words = TextMobject("Dual vector")
dual_words.set_color_by_gradient(BLUE, YELLOW)
dual_words.next_to(vector, LEFT)
self.add(randy)
self.play(Blink(randy))
self.play(FadeIn(bubble))
self.play(randy.change_mode, "sassy")
self.play(Blink(randy))
self.wait()
self.play(randy.look, UP+LEFT)
self.play(
ShowCreation(vector),
randy.change_mode, "raise_right_hand"
)
self.wait()
self.play(Write(dual_words))
self.play(Blink(randy))
self.wait()
class ThreeStepPlan(Scene):
def construct(self):
title = TextMobject("The plan")
title.set_color(YELLOW)
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(title, DOWN)
v_tex, w_tex = get_vect_tex(*"vw")
v_text, w_text, cross_text = [
"$%s$"%s
for s in (v_tex, w_tex, v_tex + "\\times" + w_tex)
]
steps = [
TextMobject(
"1. Define a 3d-to-1d", "linear \\\\", "transformation",
"in terms of", v_text, "and", w_text
),
TextMobject(
"2. Find its", "dual vector"
),
TextMobject(
"3. Show that this dual is", cross_text
)
]
linear, transformation = steps[0][1:1+2]
steps[0].set_color_by_tex(v_text, V_COLOR)
steps[0].set_color_by_tex(w_text, W_COLOR)
steps[1][1].set_color_by_gradient(BLUE, YELLOW)
steps[2].set_color_by_tex(cross_text, P_COLOR)
VGroup(*steps).arrange(
DOWN, aligned_edge = LEFT, buff = LARGE_BUFF
).next_to(h_line, DOWN, buff = MED_SMALL_BUFF)
self.add(title)
self.play(ShowCreation(h_line))
for step in steps:
self.play(Write(step, run_time = 2))
self.wait()
linear_transformation = TextMobject("Linear", "transformation")
linear_transformation.next_to(h_line, DOWN, MED_SMALL_BUFF)
det = self.get_det()
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(3.5)
left_right_arrow = TexMobject("\\Leftrightarrow")
left_right_arrow.shift(DOWN)
det.next_to(left_right_arrow, LEFT)
rect.next_to(left_right_arrow, RIGHT)
steps[0].remove(linear, transformation)
self.play(
Transform(
VGroup(linear, transformation),
linear_transformation
),
*list(map(FadeOut, steps))
)
self.wait()
self.play(Write(left_right_arrow))
self.play(Write(det))
self.play(ShowCreation(rect))
self.wait(0)
def get_det(self):
matrix = Matrix(np.array([
["\\hat{\\imath}", "\\hat{\\jmath}", "\\hat{k}"],
["v_%d"%d for d in range(1, 4)],
["w_%d"%d for d in range(1, 4)],
]).T)
matrix.set_column_colors(X_COLOR, V_COLOR, W_COLOR)
matrix.get_mob_matrix()[1, 0].set_color(Y_COLOR)
matrix.get_mob_matrix()[2, 0].set_color(Z_COLOR)
VGroup(*matrix.get_mob_matrix()[1, 1:]).shift(0.15*DOWN)
VGroup(*matrix.get_mob_matrix()[2, 1:]).shift(0.35*DOWN)
det_text = get_det_text(matrix)
det_text.add(matrix)
return det_text
class DefineDualTransform(Scene):
def construct(self):
self.add_title()
self.show_triple_cross_product()
self.write_function()
self.introduce_dual_vector()
self.expand_dot_product()
self.ask_question()
def add_title(self):
title = TextMobject("What a student might think")
title.not_real = TextMobject("Not the real cross product")
for mob in title, title.not_real:
mob.set_width(FRAME_X_RADIUS - 1)
mob.set_color(RED)
mob.to_edge(UP)
self.add(title)
self.title = title
def show_triple_cross_product(self):
colors = [WHITE, ORANGE, W_COLOR]
tex_mobs = list(map(TexMobject, get_vect_tex(*"uvw")))
u_tex, v_tex, w_tex = tex_mobs
arrays = [
Matrix(["%s_%d"%(s, d) for d in range(1, 4)])
for s in "uvw"
]
defs_equals = VGroup()
definitions = VGroup()
for array, tex_mob, color in zip(arrays, tex_mobs, colors):
array.set_column_colors(color)
tex_mob.set_color(color)
equals = TexMobject("=")
definition = VGroup(tex_mob, equals, array)
definition.arrange(RIGHT)
definitions.add(definition)
defs_equals.add(equals)
definitions.arrange(buff = MED_SMALL_BUFF)
definitions.shift(2*DOWN)
mobs_with_targets = list(it.chain(
tex_mobs, *[a.get_entries() for a in arrays]
))
for mob in mobs_with_targets:
mob.target = mob.copy()
matrix = Matrix(np.array([
[e.target for e in array.get_entries()]
for array in arrays
]).T)
det_text = get_det_text(matrix, background_rect = False)
syms = times1, times2, equals = [
TexMobject(sym)
for sym in ("\\times", "\\times", "=",)
]
triple_cross = VGroup(
u_tex.target, times1, v_tex.target, times2, w_tex.target, equals
)
triple_cross.arrange()
final_mobs = VGroup(triple_cross, VGroup(det_text, matrix))
final_mobs.arrange()
final_mobs.next_to(self.title, DOWN, buff = MED_SMALL_BUFF)
for mob in definitions, final_mobs:
mob.set_width(FRAME_X_RADIUS - 1)
for array in arrays:
brackets = array.get_brackets()
brackets.target = matrix.get_brackets()
mobs_with_targets.append(brackets)
for def_equals in defs_equals:
def_equals.target = equals
mobs_with_targets.append(def_equals)
self.play(FadeIn(
definitions,
run_time = 2,
lag_ratio = 0.5
))
self.wait(2)
self.play(*[
Transform(mob.copy(), mob.target)
for mob in tex_mobs
] + [
Write(times1),
Write(times2),
])
triple_cross.add(*self.get_mobjects_from_last_animation()[:3])
self.play(*[
Transform(mob.copy(), mob.target)
for mob in mobs_with_targets
if mob not in tex_mobs
])
u_entries = self.get_mobjects_from_last_animation()[:3]
v_entries = self.get_mobjects_from_last_animation()[3:6]
w_entries = self.get_mobjects_from_last_animation()[6:9]
self.play(Write(det_text))
self.wait(2)
self.det_text = det_text
self.definitions = definitions
self.u_entries = u_entries
self.v_entries = v_entries
self.w_entries = w_entries
self.matrix = matrix
self.triple_cross = triple_cross
self.v_tex, self.w_tex = v_tex, w_tex
self.equals = equals
def write_function(self):
brace = Brace(self.det_text, DOWN)
number_text = brace.get_text("Number")
self.play(Transform(self.title, self.title.not_real))
self.wait()
self.play(FadeOut(self.definitions))
self.play(
GrowFromCenter(brace),
Write(number_text)
)
self.wait()
x, y, z = variables = list(map(TexMobject, "xyz"))
for var, entry in zip(variables, self.u_entries):
var.scale(0.8)
var.move_to(entry)
entry.target = var
brace.target = Brace(z)
brace.target.stretch_to_fit_width(0.5)
number_text.target = brace.target.get_text("Variable")
v_brace = Brace(self.matrix.get_mob_matrix()[0, 1], UP)
w_brace = Brace(self.matrix.get_mob_matrix()[0, 2], UP)
for vect_brace, tex in (v_brace, self.v_tex), (w_brace, self.w_tex):
vect_brace.stretch_to_fit_width(brace.target.get_width())
new_tex = tex.copy()
vect_brace.put_at_tip(new_tex)
vect_brace.tex = new_tex
func_tex = TexMobject(
"f\\left(%s\\right)"%matrix_to_tex_string(list("xyz"))
)
func_tex.scale(0.7)
func_input = Matrix(list("xyz"))
func_input_template = VGroup(*func_tex[3:-2])
func_input.set_height(func_input_template.get_height())
func_input.next_to(VGroup(*func_tex[:3]), RIGHT)
VGroup(*func_tex[-2:]).next_to(func_input, RIGHT)
func_tex[0].scale_in_place(1.5)
func_tex = VGroup(
VGroup(*[func_tex[i] for i in (0, 1, 2, -2, -1)]),
func_input
)
func_tex.next_to(self.equals, LEFT)
self.play(
FadeOut(self.title),
FadeOut(self.triple_cross),
*[
Transform(mob, mob.target)
for mob in [brace, number_text]
]
)
self.play(*[
Transform(mob, mob.target)
for mob in self.u_entries
])
self.play(*[
Write(VGroup(vect_brace, vect_brace.tex))
for vect_brace in (v_brace, w_brace)
])
self.wait()
self.play(Write(func_tex))
self.wait()
self.func_tex = func_tex
self.variables_text = VGroup(brace, number_text)
def introduce_dual_vector(self):
everything = VGroup(*self.get_mobjects())
colors = [X_COLOR, Y_COLOR, Z_COLOR]
q_marks = VGroup(*list(map(TextMobject, "???")))
q_marks.scale(2)
q_marks.set_color_by_gradient(*colors)
title = VGroup(TextMobject("This function is linear"))
title.set_color(GREEN)
title.to_edge(UP)
matrix = Matrix([list(q_marks.copy())])
matrix.set_height(self.func_tex.get_height()/2)
dual_vector = Matrix(list(q_marks))
dual_vector.set_height(self.func_tex.get_height())
dual_vector.get_brackets()[0].shift(0.2*LEFT)
dual_vector.get_entries().shift(0.1*LEFT)
dual_vector.scale(1.25)
dual_dot = VGroup(
dual_vector,
TexMobject("\\cdot").next_to(dual_vector)
)
matrix_words = TextMobject("""
$1 \\times 3$ matrix encoding the
3d-to-1d linear transformation
""")
self.play(
Write(title, run_time = 2),
everything.shift, DOWN
)
self.remove(everything)
self.add(*everything)
self.wait()
func, func_input = self.func_tex
func_input.target = func_input.copy()
func_input.target.scale(1.2)
func_input.target.move_to(self.func_tex, aligned_edge = RIGHT)
matrix.next_to(func_input.target, LEFT)
dual_dot.next_to(func_input.target, LEFT)
matrix_words.next_to(matrix, DOWN, buff = 1.5)
matrix_words.shift_onto_screen()
matrix_arrow = Arrow(
matrix_words.get_top(),
matrix.get_bottom(),
color = WHITE
)
self.play(
Transform(func, matrix),
MoveToTarget(func_input),
FadeOut(self.variables_text),
)
self.wait()
self.play(
Write(matrix_words),
ShowCreation(matrix_arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [matrix_words, matrix_arrow])))
self.play(
Transform(func, dual_vector),
Write(dual_dot[1])
)
self.wait()
p_coords = VGroup(*list(map(TexMobject, [
"p_%d"%d for d in range(1, 4)
])))
p_coords.set_color(RED)
p_array = Matrix(list(p_coords))
p_array.set_height(dual_vector.get_height())
p_array.move_to(dual_vector, aligned_edge = RIGHT)
p_brace = Brace(p_array, UP)
p_tex = TexMobject(get_vect_tex("p"))
p_tex.set_color(P_COLOR)
p_brace.put_at_tip(p_tex)
self.play(
GrowFromCenter(p_brace),
Write(p_tex)
)
self.play(Transform(
func, p_array,
run_time = 2,
lag_ratio = 0.5
))
self.remove(func)
self.add(p_array)
self.wait()
self.play(FadeOut(title))
self.wait()
self.p_array = p_array
self.input_array = func_input
def expand_dot_product(self):
everything = VGroup(*self.get_mobjects())
self.play(everything.to_edge, UP)
self.remove(everything)
self.add(*everything)
to_fade = VGroup()
p_entries = self.p_array.get_entries()
input_entries = self.input_array.get_entries()
dot_components = VGroup()
for p, x, i in zip(p_entries, input_entries, it.count()):
if i == 2:
x.sym = TexMobject("=")
else:
x.sym = TexMobject("+")
p.sym = TexMobject("\\cdot")
p.target = p.copy().scale(2)
x.target = x.copy().scale(2)
component = VGroup(p.target, p.sym, x.target, x.sym)
component.arrange()
dot_components.add(component)
dot_components.arrange()
dot_components.next_to(ORIGIN, LEFT)
dot_components.shift(1.5*DOWN)
dot_arrow = Arrow(self.p_array.get_corner(DOWN+RIGHT), dot_components)
to_fade.add(dot_arrow)
self.play(ShowCreation(dot_arrow))
new_ps = VGroup()
for p, x in zip(p_entries, input_entries):
self.play(
MoveToTarget(p.copy()),
MoveToTarget(x.copy()),
Write(p.sym),
Write(x.sym)
)
mobs = self.get_mobjects_from_last_animation()
new_ps.add(mobs[0])
to_fade.add(*mobs[1:])
self.wait()
x, y, z = self.u_entries
v1, v2, v3 = self.v_entries
w1, w2, w3 = self.w_entries
cross_components = VGroup()
quints = [
(x, v2, w3, v3, w2),
(y, v3, w1, v1, w3),
(z, v1, w2, v2, w1),
]
quints = [
[m.copy() for m in quint]
for quint in quints
]
for i, quint in enumerate(quints):
sym_strings = ["(", "\\cdot", "-", "\\cdot", ")"]
if i < 2:
sym_strings[-1] += "+"
syms = list(map(TexMobject, sym_strings))
for mob, sym in zip(quint, syms):
mob.target = mob.copy()
mob.target.scale(1.5)
mob.sym = sym
quint_targets = [mob.target for mob in quint]
component = VGroup(*it.chain(*list(zip(quint_targets, syms))))
component.arrange()
cross_components.add(component)
to_fade.add(syms[0], syms[-1], quint[0])
cross_components.arrange(DOWN, aligned_edge = LEFT, buff = MED_SMALL_BUFF)
cross_components.next_to(dot_components, RIGHT)
for quint in quints:
self.play(*[
ApplyMethod(mob.set_color, YELLOW)
for mob in quint
])
self.wait(0.5)
self.play(*[
MoveToTarget(mob)
for mob in quint
] + [
Write(mob.sym)
for mob in quint
])
self.wait()
self.play(
ApplyFunction(
lambda m : m.arrange(
DOWN, buff = MED_SMALL_BUFF+SMALL_BUFF
).next_to(cross_components, LEFT),
new_ps
),
*list(map(FadeOut, to_fade))
)
self.play(*[
Write(TexMobject("=").next_to(p, buff = 2*SMALL_BUFF))
for p in new_ps
])
equals = self.get_mobjects_from_last_animation()
self.wait(2)
everything = everything.copy()
self.play(
FadeOut(VGroup(*self.get_mobjects())),
Animation(everything)
)
self.clear()
self.add(everything)
def ask_question(self):
everything = VGroup(*self.get_mobjects())
p_tex = "$%s$"%get_vect_tex("p")
question = TextMobject(
"What vector",
p_tex,
"has \\\\ the property that"
)
question.to_edge(UP)
question.set_color(YELLOW)
question.set_color_by_tex(p_tex, P_COLOR)
everything.target = everything.copy()
everything.target.next_to(
question, DOWN, buff = MED_SMALL_BUFF
)
self.play(
MoveToTarget(everything),
Write(question)
)
self.wait()
class WhyAreWeDoingThis(TeacherStudentsScene):
def construct(self):
self.student_says(
"Um...why are \\\\ we doing this?",
target_mode = "confused"
)
self.random_blink()
self.play(self.get_teacher().change_mode, "erm")
self.change_student_modes("plain", "confused", "raise_left_hand")
self.random_blink()
self.change_student_modes("pondering", "confused", "raise_left_hand")
self.random_blink(5)
class ThreeDTripleCrossProduct(Scene):
pass
class ThreeDMovingVariableVector(Scene):
pass
class ThreeDMovingVariableVectorWithCrossShowing(Scene):
pass
class NowForTheCoolPart(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Now for the\\\\",
"cool part"
)
self.change_student_modes(*["happy"]*3)
self.random_blink(2)
self.teacher_says(
"Let's answer the same question,\\\\",
"but this time geometrically"
)
self.change_student_modes(*["pondering"]*3)
self.random_blink(2)
class ThreeDDotProductProjection(Scene):
pass #
class DotProductWords(Scene):
def construct(self):
p_tex = "$%s$"%get_vect_tex("p")
p_mob = TextMobject(p_tex)
p_mob.scale(1.5)
p_mob.set_color(P_COLOR)
input_array = Matrix(list("xyz"))
dot_product = VGroup(p_mob, Dot(radius = 0.07), input_array)
dot_product.arrange(buff = MED_SMALL_BUFF/2)
equals = TexMobject("=")
dot_product.next_to(equals, LEFT)
words = VGroup(*it.starmap(TextMobject, [
("(Length of projection)",),
("(Length of ", p_tex, ")",)
]))
times = TexMobject("\\times")
words[1].set_color_by_tex(p_tex, P_COLOR)
words[0].next_to(equals, RIGHT)
words[1].next_to(words[0], DOWN, aligned_edge = LEFT)
times.next_to(words[0], RIGHT)
everyone = VGroup(dot_product, equals, times, words)
everyone.center().set_width(FRAME_X_RADIUS - 1)
self.add(dot_product)
self.play(Write(equals))
self.play(Write(words[0]))
self.wait()
self.play(
Write(times),
Write(words[1])
)
self.wait()
class ThreeDProjectToPerpendicular(Scene):
pass #
class GeometricVolumeWords(Scene):
def construct(self):
v_tex, w_tex = [
"$%s$"%s
for s in get_vect_tex(*"vw")
]
words = VGroup(
TextMobject("(Area of", "parallelogram", ")$\\times$"),
TextMobject(
"(Component of $%s$"%matrix_to_tex_string(list("xyz")),
"perpendicular to", v_tex, "and", w_tex, ")"
)
)
words[0].set_color_by_tex("parallelogram", BLUE)
words[1].set_color_by_tex(v_tex, ORANGE)
words[1].set_color_by_tex(w_tex, W_COLOR)
words.arrange(RIGHT)
words.set_width(FRAME_WIDTH - 1)
words.to_edge(DOWN, buff = SMALL_BUFF)
for word in words:
self.play(Write(word))
self.wait()
class WriteXYZ(Scene):
def construct(self):
self.play(Write(Matrix(list("xyz"))))
self.wait()
class ThreeDDotProductWithCross(Scene):
pass
class CrossVectorEmphasisWords(Scene):
def construct(self):
v_tex, w_tex = ["$%s$"%s for s in get_vect_tex(*"vw")]
words = [
TextMobject("Perpendicular to", v_tex, "and", w_tex),
TextMobject("Length = (Area of ", "parallelogram", ")")
]
for word in words:
word.set_color_by_tex(v_tex, ORANGE)
word.set_color_by_tex(w_tex, W_COLOR)
word.set_color_by_tex("parallelogram", BLUE)
self.play(Write(word))
self.wait()
self.play(FadeOut(word))
class NextVideo(Scene):
def construct(self):
title = TextMobject("""
Next video: Change of basis
""")
title.to_edge(UP, buff = MED_SMALL_BUFF/2)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class ChangeOfBasisPreview(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_WIDTH,
"secondary_line_ratio" : 0
},
"t_matrix" : [[2, 1], [-1, 1]],
"i_target_color" : YELLOW,
"j_target_color" : MAROON_B,
"sum_color" : PINK,
"vector" : [-1, 2],
}
def construct(self):
randy = Randolph()
pinky = Mortimer(color = PINK)
randy.to_corner(DOWN+LEFT)
pinky.to_corner(DOWN+RIGHT)
self.plane.fade()
self.add_foreground_mobject(randy, pinky)
coords = Matrix(self.vector)
coords.add_to_back(BackgroundRectangle(coords))
self.add_foreground_mobject(coords)
coords.move_to(
randy.get_corner(UP+RIGHT),
aligned_edge = DOWN+LEFT
)
coords.target = coords.copy()
coords.target.move_to(
pinky.get_corner(UP+LEFT),
aligned_edge = DOWN+RIGHT
)
self.play(
Write(coords),
randy.change_mode, "speaking"
)
self.scale_basis_vectors()
self.apply_transposed_matrix(
self.t_matrix,
added_anims = [
MoveToTarget(coords),
ApplyMethod(pinky.change_mode, "speaking"),
ApplyMethod(randy.change_mode, "plain"),
]
)
self.play(
randy.change_mode, "erm",
self.i_hat.set_color, self.i_target_color,
self.j_hat.set_color, self.j_target_color,
)
self.i_hat.color = self.i_target_color
self.j_hat.color = self.j_target_color
self.scale_basis_vectors()
def scale_basis_vectors(self):
for vect in self.i_hat, self.j_hat:
vect.save_state()
self.play(self.i_hat.scale, self.vector[0])
self.play(self.j_hat.scale, self.vector[1])
self.play(self.j_hat.shift, self.i_hat.get_end())
sum_vect = Vector(self.j_hat.get_end(), color = self.sum_color)
self.play(ShowCreation(sum_vect))
self.wait(2)
self.play(
FadeOut(sum_vect),
self.i_hat.restore,
self.j_hat.restore,
)
self.wait()
| true | true |
f72b264401ddefa4e28e25f16a1019753ba3292c | 1,370 | py | Python | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | from ..wallet import *
from _coin import *
from ..bip32 import Bip32
from blockchain._insight import InsightBlockchainInterface
from blockchain._interface import MultiBlockchainInterface
from impl._segwitcoin import *
class BTC(SegwitCoin):
def __init__(self,is_testnet=False):
#self.supported=True
if(not is_testnet):
pkh_prefix=0x00
sh_prefix=0x05
wif_prefix=0x80
bech32_prefix="bc"
else:
pkh_prefix=0x6F
sh_prefix=0xC4
wif_prefix=0xEF
bech32_prefix="tb"
sig_prefix=b'Bitcoin Signed Message:\n'
super(BTC,self).__init__('BTC',is_testnet=is_testnet,
pkh_prefix=pkh_prefix,
sh_prefix=sh_prefix,
wif_prefix=wif_prefix,
sig_prefix=sig_prefix,bech32_prefix=bech32_prefix)
def blockchain(self,*args,**kwargs):
subcoins=[]
if(not self.is_testnet):
insighturls=[
"https://insight.bitpay.com/api",
"https://blockexplorer.com/api",
"https://localbitcoinschain.com/api",
"https://bitcore2.trezor.io/api",
"https://btc.blockdozer.com/insight-api"
]
else:
insighturls=[
"https://tbtc.blockdozer.com/insight-api",
"https://testnet.blockexplorer.com/api"
#"https://test-insight.bitpay.com/api" This is testnetv1, doesn't work
]
insights=[InsightBlockchainInterface(self,insighturls)]
subcoins.extend(insights)
return MultiBlockchainInterface(self,subcoins).select()
| 26.346154 | 75 | 0.734307 | from ..wallet import *
from _coin import *
from ..bip32 import Bip32
from blockchain._insight import InsightBlockchainInterface
from blockchain._interface import MultiBlockchainInterface
from impl._segwitcoin import *
class BTC(SegwitCoin):
def __init__(self,is_testnet=False):
if(not is_testnet):
pkh_prefix=0x00
sh_prefix=0x05
wif_prefix=0x80
bech32_prefix="bc"
else:
pkh_prefix=0x6F
sh_prefix=0xC4
wif_prefix=0xEF
bech32_prefix="tb"
sig_prefix=b'Bitcoin Signed Message:\n'
super(BTC,self).__init__('BTC',is_testnet=is_testnet,
pkh_prefix=pkh_prefix,
sh_prefix=sh_prefix,
wif_prefix=wif_prefix,
sig_prefix=sig_prefix,bech32_prefix=bech32_prefix)
def blockchain(self,*args,**kwargs):
subcoins=[]
if(not self.is_testnet):
insighturls=[
"https://insight.bitpay.com/api",
"https://blockexplorer.com/api",
"https://localbitcoinschain.com/api",
"https://bitcore2.trezor.io/api",
"https://btc.blockdozer.com/insight-api"
]
else:
insighturls=[
"https://tbtc.blockdozer.com/insight-api",
"https://testnet.blockexplorer.com/api"
]
insights=[InsightBlockchainInterface(self,insighturls)]
subcoins.extend(insights)
return MultiBlockchainInterface(self,subcoins).select()
| true | true |
f72b27956bef78d99560b5b1289b72d9c87c03d4 | 1,672 | py | Python | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode,split,desc,max
from pyspark.sql.types import *
from pyspark.sql.types import StringType, StructType, StructField
spark = SparkSession \
.builder \
.appName("StructuredStreaming") \
.getOrCreate()
inputpath="hdfs://localhost:9000/stream/"
schema = StructType([ StructField("ID", StringType(), True),
StructField("Lang", StringType(), True),
StructField("Date", StringType(), True),
StructField("Source", StringType(), True),
StructField("Len", StringType(), True),
StructField("Likes", StringType(), True),
StructField("RTs", StringType(), True),
StructField("Hashtags", StringType(), True),
StructField("UserMentionNames", StringType(), True),
StructField("UserMentionID", StringType(), True),
StructField("name", StringType(), True),
StructField("Place", StringType(), True),
StructField("Followers", StringType(), True),
StructField("Friends", StringType(), True)])
lines = spark \
.readStream \
.schema(schema) \
.option("sep", ";") \
.csv(inputpath)
inputDF = lines.withColumn("FRRatio",lines.Followers/lines.Friends)
inputDF = inputDF.groupBy("name").agg(max("FRRatio").alias("FRRatio")).sort(desc("FRRatio")).select("name","FRRatio")
query=inputDF.writeStream.outputMode("complete").option("numRows",1).format("console").start()
query.awaitTermination(60)
query.stop()
| 44 | 117 | 0.600478 | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode,split,desc,max
from pyspark.sql.types import *
from pyspark.sql.types import StringType, StructType, StructField
spark = SparkSession \
.builder \
.appName("StructuredStreaming") \
.getOrCreate()
inputpath="hdfs://localhost:9000/stream/"
schema = StructType([ StructField("ID", StringType(), True),
StructField("Lang", StringType(), True),
StructField("Date", StringType(), True),
StructField("Source", StringType(), True),
StructField("Len", StringType(), True),
StructField("Likes", StringType(), True),
StructField("RTs", StringType(), True),
StructField("Hashtags", StringType(), True),
StructField("UserMentionNames", StringType(), True),
StructField("UserMentionID", StringType(), True),
StructField("name", StringType(), True),
StructField("Place", StringType(), True),
StructField("Followers", StringType(), True),
StructField("Friends", StringType(), True)])
lines = spark \
.readStream \
.schema(schema) \
.option("sep", ";") \
.csv(inputpath)
inputDF = lines.withColumn("FRRatio",lines.Followers/lines.Friends)
inputDF = inputDF.groupBy("name").agg(max("FRRatio").alias("FRRatio")).sort(desc("FRRatio")).select("name","FRRatio")
query=inputDF.writeStream.outputMode("complete").option("numRows",1).format("console").start()
query.awaitTermination(60)
query.stop()
| true | true |
f72b2840162bfc1b4ca923abd4640365761a2d0e | 19,645 | py | Python | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | import json
import os
import re
import unittest
from collections import Counter
from datetime import datetime
from unittest import mock
from xml.dom import pulldom
from django.test import TestCase, override_settings
from wagtail.core.models import Page
from example.models import Category
from wagtail_wordpress_import.functions import node_to_dict
from wagtail_wordpress_import.importers.wordpress import (
DEFAULT_PREFILTERS,
WordpressImporter,
WordpressItem,
)
from wagtail_wordpress_import.logger import Logger
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
FIXTURES_PATH = BASE_PATH + "/fixtures"
LOG_DIR = "fakedir"
IMPORTER_RUN_PARAMS_TEST = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post", "page"],
"page_statuses": ["publish", "draft"],
}
class WordpressItemTests(TestCase):
def setUp(self):
self.logger = Logger("fakedir")
body_html = """<p>Dummmy text</p><p>Dummmy text</p><p>Dummmy text</p>"""
self.good_node = {
"title": "Page Title",
"wp:post_name": "page-title",
"wp:post_date_gmt": "2017-03-12 17:53:57",
"wp:post_modified_gmt": "2018-12-04 11:49:24",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "http://www.example.com",
}
self.bad_node = {
"title": "Page Title",
"wp:post_name": "",
"wp:post_date_gmt": "0000-00-00 00:00:00",
"wp:post_modified_gmt": "0000-00-00 00:00:00",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "",
}
def test_all_fields_with_good_data(self):
wordpress_item = WordpressItem(self.good_node, self.logger)
title = wordpress_item.cleaned_data["title"]
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
body = wordpress_item.cleaned_data["body"]
wp_post_id = wordpress_item.cleaned_data["wp_post_id"]
wp_post_type = wordpress_item.cleaned_data["wp_post_type"]
wp_link = wordpress_item.cleaned_data["wp_link"]
wp_raw_content = wordpress_item.debug_content["filter_linebreaks_wp"]
wp_processed_content = wordpress_item.debug_content[
"filter_transform_inline_styles"
]
wp_block_json = wordpress_item.debug_content["block_json"]
self.assertEqual(title, "Page Title")
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertIsInstance(json.dumps(body), str)
self.assertEqual(wp_post_id, 1000)
self.assertEqual(wp_post_type, "post")
self.assertEqual(wp_link, "http://www.example.com")
self.assertIsInstance(wp_raw_content, str)
self.assertIsInstance(wp_processed_content, str)
self.assertIsInstance(wp_block_json, list)
self.assertTrue(
len(wp_block_json), 1
) # we are only parsing consecutive paragraphs so the will only be one block (rich_text)
def test_cleaned_fields(self):
wordpress_item = WordpressItem(self.bad_node, self.logger)
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
wp_link = wordpress_item.cleaned_data["wp_link"]
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertEqual(wp_link, "")
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
) # testing requires a live domain for requests to use, this is something I need to change before package release
# mocking of somesort, using localhost:8000 for now
class WordpressItemImportTests(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=IMPORTER_RUN_PARAMS_TEST["page_types"],
page_statuses=IMPORTER_RUN_PARAMS_TEST["page_statuses"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_category_snippets_are_saved(self):
snippets = Category.objects.all()
self.assertEqual(len(snippets), 4)
def test_page_one_has_categories(self):
page_one = self.imported_pages.get(title="Item one title")
categories = page_one.specific.categories.all()
self.assertEqual(2, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Life")
def test_page_two_has_categories(self):
page_two = self.imported_pages.get(title="Item two title")
categories = page_two.specific.categories.all()
self.assertEqual(3, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Cars")
self.assertEqual(categories[2].name, "Computing")
def test_short_category_is_not_imported(self):
page_one = self.imported_pages.get(title="Item one title")
categories = [category.name for category in page_one.specific.categories.all()]
self.assertNotIn("A", categories)
def test_categories_have_no_duplicate_entries(self):
categories = [category.name for category in Category.objects.all()]
duplicates = [
k for k, v in Counter(categories).items() if v > 1
] # duplicates will be empty if no duplicate category names exist
self.assertEqual(len(duplicates), 0)
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
) # testing requires a live domain for requests to use, this is something I need to change before package release
# mocking of somesort, using localhost:8000 for now
class WordpressItemImportTestsNoCategories(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=["hasnocategories"],
page_statuses=["hasnocategories"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_page_has_no_categories(self):
page = self.imported_pages.first()
categories = page.specific.categories.all()
self.assertEqual(0, categories.count())
def test_categories_count_is_zero(self):
count = Category.objects.count()
self.assertEqual(count, 0)
IMPORTER_RUN_PARAMS_TEST_OVERRIDE_1 = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post"],
"page_statuses": ["publish"],
}
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_YOAST_PLUGIN_ENABLED=True,
)
class WordpressImporterTestsYoastMetaDescriptions(TestCase):
"""
This tests when a wp:postmeta for none single or multiple keys in the XML file.
If the meta key for yoast is not present the <description></description> content is returned.
"""
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
# self.items_dict[0] = the single item wp:post_meta without yoast
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_1(self):
# self.items_dict[1] = the multiple item wp:post_meta
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_2(self):
# self.items_dict[2] = the single item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_3(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_4(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
class WordpressImporterTestsCleanWpPostMeta(TestCase):
"""
This tests the wp_post_meta field contents after cleaning in
WordpressItem().clean_wp_post_meta()
"""
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
# self.items_dict[0] = the single item wp:post_meta without yoast
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
thumbnail_id = wordpress_item.clean_wp_post_meta()["thumbnail_id"]
self.assertEqual(thumbnail_id, 43124)
def test_items_dict_1(self):
# self.items_dict[1] = the multiple item wp:post_meta
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 100)
self.assertEqual(post_meta["pinterest_shares"], 200)
self.assertEqual(post_meta["twitter_shares"], 300)
def test_items_dict_2(self):
# self.items_dict[2] = the single item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_3(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 10)
self.assertEqual(post_meta["pinterest_shares"], 20)
self.assertEqual(post_meta["twitter_shares"], 30)
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_4(self):
# self.items_dict[4] = has no wp:post_meta items
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
with self.assertRaises(KeyError):
wordpress_item.clean_wp_post_meta()["wp:postmeta"]
def test_items_dict_1_excluded_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
with self.assertRaises(KeyError):
cleaned_postmeta["wp:postmeta"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp_post_meta"]
with self.assertRaises(KeyError):
cleaned_postmeta["content:encoded"]
with self.assertRaises(KeyError):
cleaned_postmeta["dc:creator"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp:post_id"]
def test_items_dict_1_included_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
self.assertTrue("title" in cleaned_postmeta)
self.assertTrue("dc_creator" in cleaned_postmeta)
self.assertTrue("guid" in cleaned_postmeta)
self.assertTrue("description" in cleaned_postmeta)
self.assertTrue("wp_post_id" in cleaned_postmeta)
self.assertTrue("wp_post_date" in cleaned_postmeta)
self.assertTrue("category" in cleaned_postmeta)
self.assertTrue("facebook_shares" in cleaned_postmeta)
self.assertTrue("pinterest_shares" in cleaned_postmeta)
self.assertTrue("twitter_shares" in cleaned_postmeta)
class TestWordpressItemPrefilterConfig(TestCase):
def test_prefilter_content_default(self):
# The expected output should be transformed after passing through the
# the default prefilters
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "<p>foo bar baz</p>\n")
class TestWordpressPrefilterDefaults(TestCase):
def test_default_prefilters(self):
self.assertIsInstance(DEFAULT_PREFILTERS, list)
self.assertTrue(len(DEFAULT_PREFILTERS), 4)
self.assertEqual(
DEFAULT_PREFILTERS[0]["FUNCTION"],
"wagtail_wordpress_import.prefilters.linebreaks_wp",
)
self.assertEqual(
DEFAULT_PREFILTERS[1]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_shortcodes",
)
self.assertEqual(
DEFAULT_PREFILTERS[2]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_inline_styles",
)
self.assertEqual(
DEFAULT_PREFILTERS[3]["FUNCTION"],
"wagtail_wordpress_import.prefilters.bleach_clean",
)
def foo_filter(content, options):
return content, options
def transform_foo(soup, tag):
new_tag = soup.new_tag("foo")
new_tag.string = tag.string
tag.replace_with(new_tag)
class TestWordpressItemPrefilterOverride(TestCase):
"""Test developers' ability to edit settings.WAGTAIL_WORDPRESS_IMPORT_PREFILTERS"""
@override_settings(WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[])
def test_prefilter_content_no_filters(self):
"""Remove all pre-filters
The expected output is the same as the input because there are no prefilters to
apply to the content
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "foo bar baz")
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter"
}
]
)
def test_custom_provided_prefilter(self):
"""Provide a custom pre-filter
The expected output is the same as the input because the applied filters do
nothing and return the same value.
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], None)
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter",
"OPTIONS": {"foo": "bar"},
}
]
)
def test_custom_provided_prefilter_with_options(self):
"""Provide a custom pre-filter with options
The expected output is the same as the input because the applied filters do
nothing and return the same value.
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], {"foo": "bar"})
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.prefilters.transform_inline_styles",
"OPTIONS": {
"TRANSFORM_STYLES_MAPPING": [
(
re.compile(r"font-weight:bold", re.IGNORECASE),
"wagtail_wordpress_import.test.tests.test_wordpress_item.transform_foo",
)
],
},
},
]
)
def test_transform_styles_filter_add_options(self):
"""Test that a developer can pass custom OPTIONS to transform_inline_styles.
Here WAGTAIL_WORDPRESS_IMPORT_PREFILTERS contains only config for
transform_inline_styles, so that other prefilters are not run, and it's easier
to test the output.
"""
node = {"content:encoded": '<p style="font-weight: bold">foo bar baz</p>'}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output.strip(), "<foo>foo bar baz</foo>")
| 40.256148 | 114 | 0.67269 | import json
import os
import re
import unittest
from collections import Counter
from datetime import datetime
from unittest import mock
from xml.dom import pulldom
from django.test import TestCase, override_settings
from wagtail.core.models import Page
from example.models import Category
from wagtail_wordpress_import.functions import node_to_dict
from wagtail_wordpress_import.importers.wordpress import (
DEFAULT_PREFILTERS,
WordpressImporter,
WordpressItem,
)
from wagtail_wordpress_import.logger import Logger
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
FIXTURES_PATH = BASE_PATH + "/fixtures"
LOG_DIR = "fakedir"
IMPORTER_RUN_PARAMS_TEST = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post", "page"],
"page_statuses": ["publish", "draft"],
}
class WordpressItemTests(TestCase):
def setUp(self):
self.logger = Logger("fakedir")
body_html = """<p>Dummmy text</p><p>Dummmy text</p><p>Dummmy text</p>"""
self.good_node = {
"title": "Page Title",
"wp:post_name": "page-title",
"wp:post_date_gmt": "2017-03-12 17:53:57",
"wp:post_modified_gmt": "2018-12-04 11:49:24",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "http://www.example.com",
}
self.bad_node = {
"title": "Page Title",
"wp:post_name": "",
"wp:post_date_gmt": "0000-00-00 00:00:00",
"wp:post_modified_gmt": "0000-00-00 00:00:00",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "",
}
def test_all_fields_with_good_data(self):
wordpress_item = WordpressItem(self.good_node, self.logger)
title = wordpress_item.cleaned_data["title"]
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
body = wordpress_item.cleaned_data["body"]
wp_post_id = wordpress_item.cleaned_data["wp_post_id"]
wp_post_type = wordpress_item.cleaned_data["wp_post_type"]
wp_link = wordpress_item.cleaned_data["wp_link"]
wp_raw_content = wordpress_item.debug_content["filter_linebreaks_wp"]
wp_processed_content = wordpress_item.debug_content[
"filter_transform_inline_styles"
]
wp_block_json = wordpress_item.debug_content["block_json"]
self.assertEqual(title, "Page Title")
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertIsInstance(json.dumps(body), str)
self.assertEqual(wp_post_id, 1000)
self.assertEqual(wp_post_type, "post")
self.assertEqual(wp_link, "http://www.example.com")
self.assertIsInstance(wp_raw_content, str)
self.assertIsInstance(wp_processed_content, str)
self.assertIsInstance(wp_block_json, list)
self.assertTrue(
len(wp_block_json), 1
)
def test_cleaned_fields(self):
wordpress_item = WordpressItem(self.bad_node, self.logger)
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
wp_link = wordpress_item.cleaned_data["wp_link"]
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertEqual(wp_link, "")
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
)
class WordpressItemImportTests(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=IMPORTER_RUN_PARAMS_TEST["page_types"],
page_statuses=IMPORTER_RUN_PARAMS_TEST["page_statuses"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_category_snippets_are_saved(self):
snippets = Category.objects.all()
self.assertEqual(len(snippets), 4)
def test_page_one_has_categories(self):
page_one = self.imported_pages.get(title="Item one title")
categories = page_one.specific.categories.all()
self.assertEqual(2, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Life")
def test_page_two_has_categories(self):
page_two = self.imported_pages.get(title="Item two title")
categories = page_two.specific.categories.all()
self.assertEqual(3, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Cars")
self.assertEqual(categories[2].name, "Computing")
def test_short_category_is_not_imported(self):
page_one = self.imported_pages.get(title="Item one title")
categories = [category.name for category in page_one.specific.categories.all()]
self.assertNotIn("A", categories)
def test_categories_have_no_duplicate_entries(self):
categories = [category.name for category in Category.objects.all()]
duplicates = [
k for k, v in Counter(categories).items() if v > 1
]
self.assertEqual(len(duplicates), 0)
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
)
class WordpressItemImportTestsNoCategories(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=["hasnocategories"],
page_statuses=["hasnocategories"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_page_has_no_categories(self):
page = self.imported_pages.first()
categories = page.specific.categories.all()
self.assertEqual(0, categories.count())
def test_categories_count_is_zero(self):
count = Category.objects.count()
self.assertEqual(count, 0)
IMPORTER_RUN_PARAMS_TEST_OVERRIDE_1 = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post"],
"page_statuses": ["publish"],
}
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_YOAST_PLUGIN_ENABLED=True,
)
class WordpressImporterTestsYoastMetaDescriptions(TestCase):
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_1(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_2(self):
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_3(self):
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_4(self):
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
class WordpressImporterTestsCleanWpPostMeta(TestCase):
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
thumbnail_id = wordpress_item.clean_wp_post_meta()["thumbnail_id"]
self.assertEqual(thumbnail_id, 43124)
def test_items_dict_1(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 100)
self.assertEqual(post_meta["pinterest_shares"], 200)
self.assertEqual(post_meta["twitter_shares"], 300)
def test_items_dict_2(self):
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_3(self):
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 10)
self.assertEqual(post_meta["pinterest_shares"], 20)
self.assertEqual(post_meta["twitter_shares"], 30)
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_4(self):
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
with self.assertRaises(KeyError):
wordpress_item.clean_wp_post_meta()["wp:postmeta"]
def test_items_dict_1_excluded_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
with self.assertRaises(KeyError):
cleaned_postmeta["wp:postmeta"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp_post_meta"]
with self.assertRaises(KeyError):
cleaned_postmeta["content:encoded"]
with self.assertRaises(KeyError):
cleaned_postmeta["dc:creator"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp:post_id"]
def test_items_dict_1_included_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
self.assertTrue("title" in cleaned_postmeta)
self.assertTrue("dc_creator" in cleaned_postmeta)
self.assertTrue("guid" in cleaned_postmeta)
self.assertTrue("description" in cleaned_postmeta)
self.assertTrue("wp_post_id" in cleaned_postmeta)
self.assertTrue("wp_post_date" in cleaned_postmeta)
self.assertTrue("category" in cleaned_postmeta)
self.assertTrue("facebook_shares" in cleaned_postmeta)
self.assertTrue("pinterest_shares" in cleaned_postmeta)
self.assertTrue("twitter_shares" in cleaned_postmeta)
class TestWordpressItemPrefilterConfig(TestCase):
def test_prefilter_content_default(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "<p>foo bar baz</p>\n")
class TestWordpressPrefilterDefaults(TestCase):
def test_default_prefilters(self):
self.assertIsInstance(DEFAULT_PREFILTERS, list)
self.assertTrue(len(DEFAULT_PREFILTERS), 4)
self.assertEqual(
DEFAULT_PREFILTERS[0]["FUNCTION"],
"wagtail_wordpress_import.prefilters.linebreaks_wp",
)
self.assertEqual(
DEFAULT_PREFILTERS[1]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_shortcodes",
)
self.assertEqual(
DEFAULT_PREFILTERS[2]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_inline_styles",
)
self.assertEqual(
DEFAULT_PREFILTERS[3]["FUNCTION"],
"wagtail_wordpress_import.prefilters.bleach_clean",
)
def foo_filter(content, options):
return content, options
def transform_foo(soup, tag):
new_tag = soup.new_tag("foo")
new_tag.string = tag.string
tag.replace_with(new_tag)
class TestWordpressItemPrefilterOverride(TestCase):
@override_settings(WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[])
def test_prefilter_content_no_filters(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "foo bar baz")
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter"
}
]
)
def test_custom_provided_prefilter(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], None)
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter",
"OPTIONS": {"foo": "bar"},
}
]
)
def test_custom_provided_prefilter_with_options(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], {"foo": "bar"})
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.prefilters.transform_inline_styles",
"OPTIONS": {
"TRANSFORM_STYLES_MAPPING": [
(
re.compile(r"font-weight:bold", re.IGNORECASE),
"wagtail_wordpress_import.test.tests.test_wordpress_item.transform_foo",
)
],
},
},
]
)
def test_transform_styles_filter_add_options(self):
node = {"content:encoded": '<p style="font-weight: bold">foo bar baz</p>'}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output.strip(), "<foo>foo bar baz</foo>")
| true | true |
f72b287c0755998110f1fa14c9a7bd080f42dee2 | 1,251 | py | Python | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2020-07-29T14:22:17.000Z | 2020-11-06T18:47:40.000Z | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2016-08-01T07:37:04.000Z | 2016-08-01T07:37:04.000Z | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-12-12T21:04:41.000Z | 2020-12-12T21:04:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: A list of the routes table.
:type value: list of :class:`ExpressRouteCircuitRoutesTableSummary
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitRoutesTableSummary>`
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| 36.794118 | 85 | 0.631495 |
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| true | true |
f72b28a897f88f7a2835dba9ffb1efe2af6ae2d4 | 4,626 | py | Python | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8.1 Python SDK
Pure Storage FlashBlade REST 1.8.1 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8.1
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AlertWatcherTestResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcherTest]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""AlertWatcherTestResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this AlertWatcherTestResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this AlertWatcherTestResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this AlertWatcherTestResponse. # noqa: E501
a list of alert watcher test results # noqa: E501
:return: The items of this AlertWatcherTestResponse. # noqa: E501
:rtype: list[AlertWatcherTest]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this AlertWatcherTestResponse.
a list of alert watcher test results # noqa: E501
:param items: The items of this AlertWatcherTestResponse. # noqa: E501
:type: list[AlertWatcherTest]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertWatcherTestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertWatcherTestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.635762 | 206 | 0.607436 |
import pprint
import re
import six
class AlertWatcherTestResponse(object):
__test__ = False
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcherTest]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
self._pagination_info = pagination_info
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertWatcherTestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AlertWatcherTestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72b28d1d6a43d1771d51b5748f4617a33315439 | 7,739 | py | Python | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | 1 | 2017-11-20T17:30:31.000Z | 2017-11-20T17:30:31.000Z | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | null | null | null | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | null | null | null | #!/usr/sfw/bin/python
# -*- coding: utf-8 -*-
#C:\python27\python.exe C:\Dropbox\Work\2012ExpressionsComposees\CreateGraph.py
import sys, os, re, string, time
from math import *
#------------------------------
# Chargement des paramètres
#------------------------------
args={}
i=1;
selectedRelations = {}
selectedRelations[6] = "r_isa"
selectedRelations[9] = "r_has_part"
selectedRelations[16] = "r_instr"
selectedRelations[17] = "r_carac"
selectedRelations[23] = "r_carac-1"
selectedRelations[15] = "r_lieu"
selectedRelations[24] = "r_agent-1"
selectedRelations[26] = "r_patient-1"
selectedRelations[41] = "r_conseq"
selectedRelations[53] = "r_make"
inputFolder = os.path.abspath(os.path.dirname(sys.argv[0]))
# Addess of the tagged text containing (almost) all text files of the Hackathon:
inputTaggedTexts = inputFolder + "\\tagged.txt"
# Address of the JeuxDeMots data file
# huge one :
#inputJeuxDeMots = inputFolder + "\\09032017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# big one :
#inputJeuxDeMots = inputFolder + "\\06252017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# small one :
inputJeuxDeMots = inputFolder + "\\08152011-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
letters = {}
letters["a"] = 1
letters["b"] = 1
letters["c"] = 1
letters["d"] = 1
letters["e"] = 1
letters["f"] = 1
letters["g"] = 1
letters["h"] = 1
letters["i"] = 1
letters["j"] = 1
letters["k"] = 1
letters["l"] = 1
letters["m"] = 1
letters["n"] = 1
letters["o"] = 1
letters["p"] = 1
letters["q"] = 1
letters["r"] = 1
letters["s"] = 1
letters["t"] = 1
letters["u"] = 1
letters["v"] = 1
letters["w"] = 1
letters["x"] = 1
letters["y"] = 1
letters["z"] = 1
replacements = {}
replacements["æ"] = "ae"
replacements["à"] = "a"
replacements["á"] = "a"
replacements["á"] = "a"
replacements["ã"] = "a"
replacements["ä"] = "a"
replacements["â"] = "a"
replacements["ç"] = "c"
replacements["é"] = "e"
replacements["è"] = "e"
replacements["ë"] = "e"
replacements["ê"] = "e"
replacements["ï"] = "i"
replacements["î"] = "i"
replacements["ì"] = "i"
replacements["ñ"] = "n"
replacements["ô"] = "o"
replacements["ö"] = "o"
replacements["ó"] = "o"
replacements["œ"] = "oe"
replacements["ü"] = "u"
replacements["ù"] = "u"
replacements["ú"] = "u"
def removeAccent(word, replacements):
for letter in replacements:
word = word.replace(letter, replacements[letter])
return word
def readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters):
allWords = {}
i = 0
# Associate all word indices with words in a dictionary
try :
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# only take words with t=1 (real words)
res = re.search("eid=([0-9]*).n=.(.+)..t=1.w=([0-9]*).*",line)
if res:
id = res.group(1)
word = res.group(2)
# only take words whose first character is a letter
firstLetter = word[0].lower()
weight = int(res.group(3))
if firstLetter in letters or firstLetter in replacements:
allWords[id] = word
except ValueError:
print(str(ValueError))
pass
# Create a dictionary of the neighborhoods of all words according to the relations in selectedRelations
if 0 == 0:
i = 0
nbRelations = 0
neighbors = {}
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# extract the edges of the graph, including type and weight
res = re.search("rid=([0-9]*).n1=([0-9]*).n2=([0-9]*).t=([0-9]*).w=([0-9]+).*",line)
if res:
try :
id1 = res.group(2)
id2 = res.group(3)
type = int(res.group(4))
weight = int(res.group(5))
edgeInfo = []
edgeInfo.append(type)
edgeInfo.append(weight)
# if the relation has positive weight, is of one of the expected types
# and links two indexed words, we memorize it by saving its weight and type in a dict of dict
if (weight>0) and (type in selectedRelations) and (id1 in allWords) and (id2 in allWords):
firstWord = allWords[id1]
secondWord = allWords[id2]
if firstWord not in neighbors:
neighbors[firstWord] = {}
neighbors[firstWord][secondWord] = edgeInfo
nbRelations += 1
#print(str(nbRelations) + "relations")
except ValueError:
print(str(ValueError) + line)
pass
print(str(nbRelations) + "relations")
# Extract all sentences of the tagged text, then check which words are indexed (themselves or their lemma) in JeuxDeMots
# and are in relation in JeuxDeMots
sentence = []
results = []
sentenceString = ""
for line in open(inputTaggedTexts,"r"):
res = re.search("([^;]+);([^;]+);([^;]+)",line)
if res:
token = res.group(1)
lemma = res.group(2)
pos = res.group(3)
position = []
position.append(token)
position.append(lemma)
# if the sentence is finished:
if token[0] == token[0].upper():
# check for each pair of token if it is in the dict of relations of JeuxDeMots
for loc1 in sentence:
for loc2 in sentence:
if not (loc1 == loc2):
word1 = ""
word2 = ""
if (loc1[0] in neighbors and loc2[0] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[0]
if (loc1[1] in neighbors and loc2[0] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[0]
if (loc1[0] in neighbors and loc2[1] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[1]
if (loc1[1] in neighbors and loc2[1] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[1]
if len(word1) > 0:
result = []
#print(word1+" found! ")
result.append(word1)
result.append(word2)
result.append(selectedRelations[neighbors[word1][word2][0]])
result.append(sentenceString)
results.append(result)
sentence = []
sentenceString = ""
if position[0] in neighbors or position[1] in neighbors :
sentence.append(position)
sentenceString += token+" "
outputFile = open(inputTaggedTexts+".output.txt","w")
for result in results:
for element in result:
outputFile.writelines(element+";")
outputFile.writelines("\n")
outputFile.close()
readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters) | 35.663594 | 124 | 0.505492 |
import sys, os, re, string, time
from math import *
args={}
i=1;
selectedRelations = {}
selectedRelations[6] = "r_isa"
selectedRelations[9] = "r_has_part"
selectedRelations[16] = "r_instr"
selectedRelations[17] = "r_carac"
selectedRelations[23] = "r_carac-1"
selectedRelations[15] = "r_lieu"
selectedRelations[24] = "r_agent-1"
selectedRelations[26] = "r_patient-1"
selectedRelations[41] = "r_conseq"
selectedRelations[53] = "r_make"
inputFolder = os.path.abspath(os.path.dirname(sys.argv[0]))
inputTaggedTexts = inputFolder + "\\tagged.txt"
inputJeuxDeMots = inputFolder + "\\08152011-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
letters = {}
letters["a"] = 1
letters["b"] = 1
letters["c"] = 1
letters["d"] = 1
letters["e"] = 1
letters["f"] = 1
letters["g"] = 1
letters["h"] = 1
letters["i"] = 1
letters["j"] = 1
letters["k"] = 1
letters["l"] = 1
letters["m"] = 1
letters["n"] = 1
letters["o"] = 1
letters["p"] = 1
letters["q"] = 1
letters["r"] = 1
letters["s"] = 1
letters["t"] = 1
letters["u"] = 1
letters["v"] = 1
letters["w"] = 1
letters["x"] = 1
letters["y"] = 1
letters["z"] = 1
replacements = {}
replacements["æ"] = "ae"
replacements["à"] = "a"
replacements["á"] = "a"
replacements["á"] = "a"
replacements["ã"] = "a"
replacements["ä"] = "a"
replacements["â"] = "a"
replacements["ç"] = "c"
replacements["é"] = "e"
replacements["è"] = "e"
replacements["ë"] = "e"
replacements["ê"] = "e"
replacements["ï"] = "i"
replacements["î"] = "i"
replacements["ì"] = "i"
replacements["ñ"] = "n"
replacements["ô"] = "o"
replacements["ö"] = "o"
replacements["ó"] = "o"
replacements["œ"] = "oe"
replacements["ü"] = "u"
replacements["ù"] = "u"
replacements["ú"] = "u"
def removeAccent(word, replacements):
for letter in replacements:
word = word.replace(letter, replacements[letter])
return word
def readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters):
allWords = {}
i = 0
try :
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
res = re.search("eid=([0-9]*).n=.(.+)..t=1.w=([0-9]*).*",line)
if res:
id = res.group(1)
word = res.group(2)
firstLetter = word[0].lower()
weight = int(res.group(3))
if firstLetter in letters or firstLetter in replacements:
allWords[id] = word
except ValueError:
print(str(ValueError))
pass
if 0 == 0:
i = 0
nbRelations = 0
neighbors = {}
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
res = re.search("rid=([0-9]*).n1=([0-9]*).n2=([0-9]*).t=([0-9]*).w=([0-9]+).*",line)
if res:
try :
id1 = res.group(2)
id2 = res.group(3)
type = int(res.group(4))
weight = int(res.group(5))
edgeInfo = []
edgeInfo.append(type)
edgeInfo.append(weight)
if (weight>0) and (type in selectedRelations) and (id1 in allWords) and (id2 in allWords):
firstWord = allWords[id1]
secondWord = allWords[id2]
if firstWord not in neighbors:
neighbors[firstWord] = {}
neighbors[firstWord][secondWord] = edgeInfo
nbRelations += 1
except ValueError:
print(str(ValueError) + line)
pass
print(str(nbRelations) + "relations")
sentence = []
results = []
sentenceString = ""
for line in open(inputTaggedTexts,"r"):
res = re.search("([^;]+);([^;]+);([^;]+)",line)
if res:
token = res.group(1)
lemma = res.group(2)
pos = res.group(3)
position = []
position.append(token)
position.append(lemma)
if token[0] == token[0].upper():
for loc1 in sentence:
for loc2 in sentence:
if not (loc1 == loc2):
word1 = ""
word2 = ""
if (loc1[0] in neighbors and loc2[0] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[0]
if (loc1[1] in neighbors and loc2[0] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[0]
if (loc1[0] in neighbors and loc2[1] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[1]
if (loc1[1] in neighbors and loc2[1] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[1]
if len(word1) > 0:
result = []
result.append(word1)
result.append(word2)
result.append(selectedRelations[neighbors[word1][word2][0]])
result.append(sentenceString)
results.append(result)
sentence = []
sentenceString = ""
if position[0] in neighbors or position[1] in neighbors :
sentence.append(position)
sentenceString += token+" "
outputFile = open(inputTaggedTexts+".output.txt","w")
for result in results:
for element in result:
outputFile.writelines(element+";")
outputFile.writelines("\n")
outputFile.close()
readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters) | true | true |
f72b28ec393014292fff2aac3ffa0f3a488e9bda | 170 | py | Python | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | from handlers.common import Common
class SR(Common):
_type = "SR"
def __init__(self, xapi, ref=None, params=None):
super().__init__(xapi, ref, params)
| 18.888889 | 52 | 0.658824 | from handlers.common import Common
class SR(Common):
_type = "SR"
def __init__(self, xapi, ref=None, params=None):
super().__init__(xapi, ref, params)
| true | true |
f72b296dc9ecbc509d9451f3cf12c463f5785fef | 790 | py | Python | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | 14 | 2015-02-13T16:35:28.000Z | 2021-01-18T04:20:50.000Z | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | 1 | 2017-03-23T22:09:36.000Z | 2017-03-23T22:09:36.000Z | #!/usr/bin/python
import sys
import csv
#first arg: input file, csv. column woe_id should be the list of woe_ids we want to pull out of photos.txt
#second arg: output file, txt subset of photos.txt (also remove photoid. samplr not expecting it)
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
photofile = "photos.txt"
woes = []
ireader = csv.DictReader(open(infile, 'r'))
for line in ireader:
woes.append(line['woe_id'])
pfh = open(photofile, 'r')
ofh = open(outfile, 'w')
outstr = "%s\t%s\t%s\n"
for row in pfh:
photoid, placeid, lon, lat = row.strip().split()
if placeid in woes:
out = outstr % (placeid, lon, lat)
ofh.write(out)
if __name__ == "__main__":
sys.exit(main())
| 22.571429 | 106 | 0.605063 |
import sys
import csv
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
photofile = "photos.txt"
woes = []
ireader = csv.DictReader(open(infile, 'r'))
for line in ireader:
woes.append(line['woe_id'])
pfh = open(photofile, 'r')
ofh = open(outfile, 'w')
outstr = "%s\t%s\t%s\n"
for row in pfh:
photoid, placeid, lon, lat = row.strip().split()
if placeid in woes:
out = outstr % (placeid, lon, lat)
ofh.write(out)
if __name__ == "__main__":
sys.exit(main())
| true | true |
f72b29d93a56efc5fafb086551352e0cba9256da | 7,352 | py | Python | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 4 | 2020-06-27T22:43:34.000Z | 2021-04-12T02:29:30.000Z | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 21 | 2020-06-20T15:02:50.000Z | 2021-04-07T10:14:59.000Z | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 13 | 2020-06-28T08:13:28.000Z | 2021-12-28T00:11:56.000Z | import asyncio
import hashlib
import json
import sys
import traceback
from typing import Union, TYPE_CHECKING
import base64
from electrum.plugin import BasePlugin, hook
from electrum.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum.i18n import _
from electrum.util import log_exceptions, ignore_exceptions, make_aiohttp_session
from electrum.network import Network
if TYPE_CHECKING:
from electrum.wallet import Abstract_Wallet
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('Labels')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}: {reason}" if reason else header
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.db.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.logger.info(f"set {wallet.basename()} nonce to {nonce}")
wallet.db.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
asyncio.run_coroutine_threadsafe(self.do_post_safe("/label", bundle), wallet.network.asyncio_loop)
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
@ignore_exceptions
@log_exceptions
async def do_post_safe(self, *args):
await self.do_post(*args)
async def do_get(self, url = "/labels"):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as result:
return await result.json()
async def do_post(self, url = "/labels", data=None):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.post(url, json=data) as result:
try:
return await result.json()
except Exception as e:
raise Exception('Could not decode: ' + await result.text()) from e
async def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.logger.info(f'cannot encode {repr(key)} {repr(value)}')
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
await self.do_post("/labels", bundle)
async def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.logger.info(f"asking for labels since nonce {nonce}")
try:
response = await self.do_get("/labels/since/%d/for/%s" % (nonce, wallet_id))
except Exception as e:
raise ErrorConnectingServer(e) from e
if response["labels"] is None:
self.logger.info('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'error: no json {key}')
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.logger.info(f"received {len(response)} labels")
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def on_pulled(self, wallet: 'Abstract_Wallet') -> None:
raise NotImplementedError()
@ignore_exceptions
@log_exceptions
async def pull_safe_thread(self, wallet, force):
try:
await self.pull_thread(wallet, force)
except ErrorConnectingServer as e:
self.logger.info(repr(e))
def pull(self, wallet, force):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.pull_thread(wallet, force), wallet.network.asyncio_loop).result()
def push(self, wallet):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.push_thread(wallet), wallet.network.asyncio_loop).result()
def start_wallet(self, wallet):
if not wallet.network: return # 'offline' mode
nonce = self.get_nonce(wallet)
self.logger.info(f"wallet {wallet.basename()} nonce is {nonce}")
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
asyncio.run_coroutine_threadsafe(self.pull_safe_thread(wallet, False), wallet.network.asyncio_loop)
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
| 37.896907 | 118 | 0.616159 | import asyncio
import hashlib
import json
import sys
import traceback
from typing import Union, TYPE_CHECKING
import base64
from electrum.plugin import BasePlugin, hook
from electrum.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum.i18n import _
from electrum.util import log_exceptions, ignore_exceptions, make_aiohttp_session
from electrum.network import Network
if TYPE_CHECKING:
from electrum.wallet import Abstract_Wallet
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('Labels')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}: {reason}" if reason else header
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
nonce = wallet.db.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.logger.info(f"set {wallet.basename()} nonce to {nonce}")
wallet.db.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
asyncio.run_coroutine_threadsafe(self.do_post_safe("/label", bundle), wallet.network.asyncio_loop)
self.set_nonce(wallet, nonce + 1)
@ignore_exceptions
@log_exceptions
async def do_post_safe(self, *args):
await self.do_post(*args)
async def do_get(self, url = "/labels"):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as result:
return await result.json()
async def do_post(self, url = "/labels", data=None):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.post(url, json=data) as result:
try:
return await result.json()
except Exception as e:
raise Exception('Could not decode: ' + await result.text()) from e
async def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.logger.info(f'cannot encode {repr(key)} {repr(value)}')
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
await self.do_post("/labels", bundle)
async def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.logger.info(f"asking for labels since nonce {nonce}")
try:
response = await self.do_get("/labels/since/%d/for/%s" % (nonce, wallet_id))
except Exception as e:
raise ErrorConnectingServer(e) from e
if response["labels"] is None:
self.logger.info('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'error: no json {key}')
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.logger.info(f"received {len(response)} labels")
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def on_pulled(self, wallet: 'Abstract_Wallet') -> None:
raise NotImplementedError()
@ignore_exceptions
@log_exceptions
async def pull_safe_thread(self, wallet, force):
try:
await self.pull_thread(wallet, force)
except ErrorConnectingServer as e:
self.logger.info(repr(e))
def pull(self, wallet, force):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.pull_thread(wallet, force), wallet.network.asyncio_loop).result()
def push(self, wallet):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.push_thread(wallet), wallet.network.asyncio_loop).result()
def start_wallet(self, wallet):
if not wallet.network: return
nonce = self.get_nonce(wallet)
self.logger.info(f"wallet {wallet.basename()} nonce is {nonce}")
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
asyncio.run_coroutine_threadsafe(self.pull_safe_thread(wallet, False), wallet.network.asyncio_loop)
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
| true | true |
f72b2ad2a58898693037001dda7e833ae44efbc4 | 682 | py | Python | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 1,142 | 2016-10-10T08:55:30.000Z | 2022-03-30T04:46:16.000Z | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 195 | 2016-10-10T08:30:37.000Z | 2022-02-17T12:51:17.000Z | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 215 | 2017-02-28T00:50:29.000Z | 2022-03-22T17:01:31.000Z | from scipy.spatial import cKDTree
from .base import Structure
class KDTree(cKDTree, Structure):
def __init__(self, *, points, leafsize=16, compact_nodes=False, balanced_tree=False):
Structure.__init__(self, points=points)
self._leafsize = leafsize
self._compact_nodes = compact_nodes
self._balanced_tree = balanced_tree
def compute(self):
self.id = "K({},{},{})".format(self._leafsize, self._compact_nodes, self._balanced_tree)
cKDTree.__init__(
self,
self._points,
leafsize=self._leafsize,
compact_nodes=self._compact_nodes,
balanced_tree=self._balanced_tree)
| 31 | 96 | 0.66129 | from scipy.spatial import cKDTree
from .base import Structure
class KDTree(cKDTree, Structure):
def __init__(self, *, points, leafsize=16, compact_nodes=False, balanced_tree=False):
Structure.__init__(self, points=points)
self._leafsize = leafsize
self._compact_nodes = compact_nodes
self._balanced_tree = balanced_tree
def compute(self):
self.id = "K({},{},{})".format(self._leafsize, self._compact_nodes, self._balanced_tree)
cKDTree.__init__(
self,
self._points,
leafsize=self._leafsize,
compact_nodes=self._compact_nodes,
balanced_tree=self._balanced_tree)
| true | true |
f72b2b40cbc83a0f7d47d5e52998f5659b19648e | 1,216 | py | Python | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | import tensorflow as tf
import cv2
import numpy as np
model = tf.keras.models.load_model('saved_model/model_3.h5')
face_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source = cv2.VideoCapture(1)
labels_dict = {0: 'with_mask', 1: 'without_mask'}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}
while (True):
ret, img = source.read()
faces = face_clsfr.detectMultiScale(img)
print(img.shape)
for x, y, w, h in faces:
face_img = img[y:y + w, x:x + w]
resized = cv2.resize(face_img, (128, 128))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 128, 128, 3))
result = model.predict(reshaped)
print(result)
label=int(result.round().flatten())
cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(
img, labels_dict[label],
(x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.imshow('LIVE', img)
key = cv2.waitKey(1)
if (key == 27):
break
cv2.destroyAllWindows()
source.release() | 30.4 | 75 | 0.578947 | import tensorflow as tf
import cv2
import numpy as np
model = tf.keras.models.load_model('saved_model/model_3.h5')
face_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source = cv2.VideoCapture(1)
labels_dict = {0: 'with_mask', 1: 'without_mask'}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}
while (True):
ret, img = source.read()
faces = face_clsfr.detectMultiScale(img)
print(img.shape)
for x, y, w, h in faces:
face_img = img[y:y + w, x:x + w]
resized = cv2.resize(face_img, (128, 128))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 128, 128, 3))
result = model.predict(reshaped)
print(result)
label=int(result.round().flatten())
cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(
img, labels_dict[label],
(x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.imshow('LIVE', img)
key = cv2.waitKey(1)
if (key == 27):
break
cv2.destroyAllWindows()
source.release() | true | true |
f72b2cd039ad9416819b474d149c3f6fbea635ff | 20,451 | py | Python | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | import cv2
import os
import time
import subprocess
#from matplotlib import pyplot as plt
import numpy as np
#from test_video import get_predictions_results
#cam_capture = cv2.VideoCapture(0)
#cv2.destroyAllWindows()
""" TODO:
1. Start video at specified time
2. Right click to indicate trimming points
3. Output file name
"""
frame_time = 10
frame_count = 0
global_trim_time = None
crop_started = False
class VideoCropTool:
def __init__(self, video_path, output_file, output_folder, video_start_time,
capture, output_label, time_window_on = False,time_window=3):
"""
Args:
video_path:
output_file:
output_folder:
video_start_time:
capture:
output_label:
time_window_on:
time_window:
"""
self.video_path = video_path
self.output_file = output_file
self.output_folder = output_folder
self.output_label=output_label
self.video_start_time = video_start_time
self.cap = capture
# self.video_start_frame = video_start_frame
#for clikc box
#self.start = (0,0)
self.box_started = False
self.box_created = False
self.box_finished = False
self.start = None
self.end = None
#for cropping time
self.global_trim_time = None
self.global_trim_time_secs = None
self.crop_started = False
self.start_trim_time = None
self.end_trim_time = None
self.start_trim_time_secs = None
self.end_trim_time_secs = None
self.time_window = time_window
self.time_crop_secs = 0
self.recording = False
#result
self.result_text = ""
#frame properties
self.frame_width = 0
self.frame_height = 0
def click_box(self,event, x,y, flags, param):
"""
Detects and processes left and right clicks of the mouse on the opencv frame
Args:
event:
x:
y:
flags:
param:
Returns: None
"""
#Start drawing the box if the left button is clicked
if event == cv2.EVENT_LBUTTONDOWN:
self.start = (x, y)
self.box_started = True
#Drag the box if the mouse is moving
elif event == cv2.EVENT_MOUSEMOVE:
self.end = (x, y)
#Finalize the box if the left button is raised
elif event == cv2.EVENT_LBUTTONUP:
# global box_created
self.final_end = (x, y)
self.box_created = True
elif event == cv2.EVENT_RBUTTONDOWN:
# cropping time starts
# global crop_started
if self.crop_started != True:
self.crop_started = True
self.start_trim_time = self.global_trim_time
self.start_trim_time_secs = self.global_trim_time_secs
self.recording = True
else:
self.crop_started = False
self.trim_end_time = self.global_trim_time
#self.box_created = True
self.box_finished = True
self.end_trim_time = self.global_trim_time
self.end_trim_time_secs = self.global_trim_time_secs
self.time_crop_secs = self.end_trim_time_secs-self.start_trim_time_secs
print('crop time')
print(self.time_crop_secs)
self.recording = False
def crop_and_label(self):
"""
- Plays back the selected video in an opencv frame and allows for cropping/time selection
- Sorts the cropped video into a folder named after the given label
Returns: None
"""
while (self.cap.isOpened()):
# Capture frame-by-frame
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
# get vcap property (height and width)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
# global frame_count
# frame_count += 1
# r = cv2.selectROI("Image", frame, fromCenter, showCrosshair)
if ret == True:
if self.box_started:
rectangle_thickness=30
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness,color=333)
else:
cv2.rectangle(frame, self.start, self.end,thickness=rectangle_thickness, color=333)
# except:
# cv2.rectangle(frame, self.start, self.end, color=333)
# Display the resulting frame
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
self.global_trim_time_secs = current_time_in_secs
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0): # single digit
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0): # single digit
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0): # single digit
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0): # single digit
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
# if (self.time_window ): # single digit
if (self.time_crop_secs<10):
#TIME_WINDOW_STR = "0" + str(self.time_window)
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
# global global_trim_time
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
# cut_time = "00:00:"+TIME_WINDOW_STR
text = str(round(current_time, 2))
# try:
# result_text = get_predictions_results()
# except:
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
#Red dot while cropping
if self.recording:
# Radius of circle
radius = 20
# Center coordinates
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
# Red color in BGR
circle_color = (0, 0, 255)
# Line thickness of -1 px
circle_thickness = -1
# Using cv2.circle() method
# Draw a circle of red color of thickness -1 px
image = cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_label + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs) + " "
end_arg = "-e " + TIME_WINDOW_STR
# print("beginning and end ")
# print(beginning_arg)
# print(end_arg)
crop_time_start = time.time()
if not os.path.exists(self.output_folder+"/"+self.output_label):
os.makedirs(self.output_folder+"/"+self.output_label)
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
# video_model_command = "python test_video.py --draw_crop_test.mp4 --arch resnet3d50"
# reset
self.box_created = False
self.box_started = False
self.box_finished = False
with open("custom_labels.txt", "a+") as text_file:
# all_labels = text_file.read()
label_exists = False
# print('all labels')
# print(all_labels)
for line in text_file:
if line==self.output_label:
label_exists=True
break
if not label_exists:
text_file.write("\n")
text_file.write(self.output_label)
print(self.output_label)
# Press Q on keyboard to exit
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
# Break the loop
else:
break
self.cap.release()
cv2.destroyAllWindows()
def crop_and_predict(self):
"""
- Plays back the selected video in an opencv frame and allows for cropping/time selection
- Runs the moments in time model and gives the top 5 predictions for the selected segment in the terminal
Returns: None
"""
while (self.cap.isOpened()):
# Capture frame-by-frame
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
# get vcap property (height and width)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
# global frame_count
# frame_count += 1
# r = cv2.selectROI("Image", frame, fromCenter, showCrosshair)
if ret == True:
if self.box_started:
# print('boxes')
# print(self.start)
# print(self.end)
rectangle_thickness = 10
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness, color=333)
else:
cv2.rectangle(frame, self.start, self.end, thickness=rectangle_thickness, color=333)
# except:
# cv2.rectangle(frame, self.start, self.end, color=333)
# Display the resulting frame
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
self.global_trim_time_secs = current_time_in_secs
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0): # single digit
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0): # single digit
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0): # single digit
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0): # single digit
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
#if (self.time_window // 10 == 0 and self.time_window!=10): # single digit
if (self.time_crop_secs < 10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
# global global_trim_time
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
# cut_time = "00:00:"+TIME_WINDOW_STR
text = str(round(current_time, 2))
# try:
# result_text = get_predictions_results()
# print(result_text)
# except:
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
# Red dot while cropping
if self.recording:
#print('recording')
# Radius of circle
radius = 20
# Center coordinates
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
# Red color in BGR
circle_color = (0, 0, 255)
# Line thickness of -1 px
circle_thickness = -1
# Using cv2.circle() method
# Draw a circle of red color of thickness -1 px
cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs)+ " "
end_arg = "-e " + TIME_WINDOW_STR
#
print("beginning and end ")
print(beginning_arg)
print(end_arg)
crop_time_start = time.time()
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
# video_model_command = "python test_video.py --draw_crop_test.mp4 --arch resnet3d50"
prediction_time_start = time.time()
os.system("python test_video.py --video_file " + self.output_folder+"/"+self.output_file + ".mp4 " + "--arch resnet3d50")
prediction_time_end = time.time()
prediction_elapsed_time = prediction_time_end - prediction_time_start
print("Prediction Time: " + str(prediction_elapsed_time))
# Opening prediction file
file1 = open('predictions.txt', 'r')
result_text = ""
for line in file1:
print(line)
result_text += line
break # just first prediction
# result_text += "\n"
# reset
self.box_created = False
self.box_started = False
self.box_finished = False
# Press Q on keyboard to exit
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
# Break the loop
else:
break
self.cap.release()
cv2.destroyAllWindows()
def main():
TIME_WINDOW = 3 # seconds
#video_file_path = 'videos/whats_app_vid_1.mp4'
video_file_path = 'videos/IMG_4884.MOV'
output_file = "demo_clip"
output_folder = "trimmed_videos"
output_label = "tossing"
result_text = ""
video_start_time = 0 # in secs
fps = 30
video_start_frame = video_start_time*fps
cap = cv2.VideoCapture(video_file_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, video_start_frame)
my_crop_tool = VideoCropTool(video_file_path, output_file, output_folder, 0, cap, output_label)
my_crop_tool.crop_and_predict()
#my_crop_tool.crop_and_label()
if __name__=="__main__":
main()
| 33.691928 | 156 | 0.509413 | import cv2
import os
import time
import subprocess
import numpy as np
frame_time = 10
frame_count = 0
global_trim_time = None
crop_started = False
class VideoCropTool:
def __init__(self, video_path, output_file, output_folder, video_start_time,
capture, output_label, time_window_on = False,time_window=3):
self.video_path = video_path
self.output_file = output_file
self.output_folder = output_folder
self.output_label=output_label
self.video_start_time = video_start_time
self.cap = capture
self.box_started = False
self.box_created = False
self.box_finished = False
self.start = None
self.end = None
self.global_trim_time = None
self.global_trim_time_secs = None
self.crop_started = False
self.start_trim_time = None
self.end_trim_time = None
self.start_trim_time_secs = None
self.end_trim_time_secs = None
self.time_window = time_window
self.time_crop_secs = 0
self.recording = False
self.result_text = ""
self.frame_width = 0
self.frame_height = 0
def click_box(self,event, x,y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.start = (x, y)
self.box_started = True
elif event == cv2.EVENT_MOUSEMOVE:
self.end = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
self.final_end = (x, y)
self.box_created = True
elif event == cv2.EVENT_RBUTTONDOWN:
if self.crop_started != True:
self.crop_started = True
self.start_trim_time = self.global_trim_time
self.start_trim_time_secs = self.global_trim_time_secs
self.recording = True
else:
self.crop_started = False
self.trim_end_time = self.global_trim_time
self.box_finished = True
self.end_trim_time = self.global_trim_time
self.end_trim_time_secs = self.global_trim_time_secs
self.time_crop_secs = self.end_trim_time_secs-self.start_trim_time_secs
print('crop time')
print(self.time_crop_secs)
self.recording = False
def crop_and_label(self):
while (self.cap.isOpened()):
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if ret == True:
if self.box_started:
rectangle_thickness=30
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness,color=333)
else:
cv2.rectangle(frame, self.start, self.end,thickness=rectangle_thickness, color=333)
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
self.global_trim_time_secs = current_time_in_secs
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0):
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0):
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0):
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0):
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
if (self.time_crop_secs<10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
text = str(round(current_time, 2))
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
if self.recording:
radius = 20
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
circle_color = (0, 0, 255)
circle_thickness = -1
image = cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_label + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs) + " "
end_arg = "-e " + TIME_WINDOW_STR
crop_time_start = time.time()
if not os.path.exists(self.output_folder+"/"+self.output_label):
os.makedirs(self.output_folder+"/"+self.output_label)
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
self.box_created = False
self.box_started = False
self.box_finished = False
with open("custom_labels.txt", "a+") as text_file:
label_exists = False
for line in text_file:
if line==self.output_label:
label_exists=True
break
if not label_exists:
text_file.write("\n")
text_file.write(self.output_label)
print(self.output_label)
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
else:
break
self.cap.release()
cv2.destroyAllWindows()
def crop_and_predict(self):
while (self.cap.isOpened()):
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if ret == True:
if self.box_started:
rectangle_thickness = 10
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness, color=333)
else:
cv2.rectangle(frame, self.start, self.end, thickness=rectangle_thickness, color=333)
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
self.global_trim_time_secs = current_time_in_secs
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0):
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0):
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0):
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0):
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
if (self.time_crop_secs < 10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
text = str(round(current_time, 2))
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
if self.recording:
radius = 20
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
circle_color = (0, 0, 255)
circle_thickness = -1
cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs)+ " "
end_arg = "-e " + TIME_WINDOW_STR
print("beginning and end ")
print(beginning_arg)
print(end_arg)
crop_time_start = time.time()
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
prediction_time_start = time.time()
os.system("python test_video.py --video_file " + self.output_folder+"/"+self.output_file + ".mp4 " + "--arch resnet3d50")
prediction_time_end = time.time()
prediction_elapsed_time = prediction_time_end - prediction_time_start
print("Prediction Time: " + str(prediction_elapsed_time))
file1 = open('predictions.txt', 'r')
result_text = ""
for line in file1:
print(line)
result_text += line
break
self.box_created = False
self.box_started = False
self.box_finished = False
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
else:
break
self.cap.release()
cv2.destroyAllWindows()
def main():
TIME_WINDOW = 3
video_file_path = 'videos/IMG_4884.MOV'
output_file = "demo_clip"
output_folder = "trimmed_videos"
output_label = "tossing"
result_text = ""
video_start_time = 0
fps = 30
video_start_frame = video_start_time*fps
cap = cv2.VideoCapture(video_file_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, video_start_frame)
my_crop_tool = VideoCropTool(video_file_path, output_file, output_folder, 0, cap, output_label)
my_crop_tool.crop_and_predict()
if __name__=="__main__":
main()
| true | true |
f72b2cffb7796783443939305fa1035e7ad944b2 | 13,043 | py | Python | cltk/tests/test_nlp/test_tag.py | mcnorton05/cltk | 80dbbd6ee378ed4a6dd1723e4405e314b25f1638 | [
"MIT"
] | 1 | 2020-05-01T08:21:22.000Z | 2020-05-01T08:21:22.000Z | cltk/tests/test_nlp/test_tag.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | cltk/tests/test_nlp/test_tag.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | """Test cltk.tag."""
import os
import shutil
import unittest
from cltk.corpus.utils.importer import CorpusImporter
from cltk.stem.latin.j_v import JVReplacer
from cltk.tag import ner
from cltk.tag.ner import NamedEntityReplacer
from cltk.tag.pos import POSTag
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def setUp(self):
"""Clone Greek models in order to test pull function and other model
tests later.
"""
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('french')
corpus_importer.import_corpus('french_data_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/french/text/french_data_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter("old_norse")
corpus_importer.import_corpus("old_norse_models_cltk")
file_rel = os.path.join(get_cltk_data_dir() + '/old_norse/model/old_norse_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('middle_low_german')
corpus_importer.import_corpus('middle_low_german_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/middle_low_german/model/middle_low_german_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('old_english')
corpus_importer.import_corpus('old_english_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/old_english/model/old_english_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
def test_pos_unigram_greek(self):
"""Test tagging Greek POS with unigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_unigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_bigram_greek(self):
"""Test tagging Greek POS with bigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_bigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_trigram_greek(self):
"""Test tagging Greek POS with trigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_trigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_ngram123_tagger_greek(self):
"""Test tagging Greek POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_ngram_123_backoff('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_greek(self):
"""Test tagging Greek POS with TnT tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_tnt('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_unigram_latin(self):
"""Test tagging Latin POS with unigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_unigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_bigram_latin(self):
"""Test tagging Latin POS with bigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_bigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_trigram_latin(self):
"""Test tagging Latin POS with trigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_trigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_latin(self):
"""Test tagging Latin POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_ngram_123_backoff('Gallia est omnis divisa in partes tres') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_latin(self):
"""Test tagging Latin POS with TnT tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_tnt('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_crf_tagger_latin(self):
"""Test tagging Latin POS with CRF tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_crf('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
ner._check_latest_data('latin')
names_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
path = get_cltk_data_dir() + '/latin/model/latin_models_cltk'
#p = get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt'
names_dir = os.path.expanduser(path)
shutil.rmtree(names_dir, ignore_errors=True)
ner._check_latest_data('latin')
names_path = os.path.join(names_dir, 'ner', 'proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_tag_ner_str_list_latin(self):
"""Test make_ner(), str, list."""
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.tag_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_latin(self):
"""Test make_ner(), list, list."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.tag_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_latin(self):
"""Test make_ner(), list, str."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
text = ner.tag_ner('latin', input_text=text_list_iu, output_type=str)
target = ' ut Uenus/Entity Sirius/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_latin(self):
"""Test make_ner(), str, str."""
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
text = ner.tag_ner('latin', input_text=text_str_iu, output_type=str)
target = ' ut Uenus/Entity, ut Sirius/Entity, ut Spica/Entity, ut aliae quae primae dicuntur esse mangitudinis.'
self.assertEqual(text, target)
def test_tag_ner_str_list_greek(self):
"""Test make_ner(), str, list."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
tokens = ner.tag_ner('greek', input_text=text_str, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity'), ('Κάππαρος', 'Entity'), ('Πρωτογενείας', 'Entity'), ('Διονυσιάδες', 'Entity'), ('τὴν',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_greek(self):
"""Test make_ner(), list, list."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
tokens = ner.tag_ner('greek', input_text=text_list, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_greek(self):
"""Test make_ner(), list, str."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
text = ner.tag_ner('greek', input_text=text_list, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_greek(self):
"""Test make_ner(), str, str."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
text = ner.tag_ner('greek', input_text=text_str, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity Κάππαρος/Entity Πρωτογενείας/Entity Διονυσιάδες/Entity τὴν'
self.assertEqual(text, target)
def test_tag_ner_str_list_french(self):
"""Test make_ner(), str, list."""
text_str = """Berte fu mere Charlemaine, qui pukis tint France et tot le Maine."""
ner_replacer = NamedEntityReplacer()
tokens = ner_replacer.tag_ner_fr(input_text=text_str, output_type=list)
target = [[('Berte', 'entity', 'CHI')], ('fu',), ('mere',), [('Charlemaine', 'entity', 'CHI')], (',',), ('qui',), ('pukis',),
('tint',), [('France', 'entity', 'LOC')], ('et',), ('tot',), ('le',), [('Maine', 'entity', 'LOC')], ('.',)]
self.assertEqual(tokens, target)
def test_pos_tnt_tagger_old_norse(self):
"""Test tagging Old Norse POS with TnT tagger."""
tagger = POSTag('old_norse')
tagged = tagger.tag_tnt('Hlióðs bið ek allar.')
print(tagged)
self.assertTrue(tagged)
def test_pos_ngram12_tagger_middle_low_german(self):
""" Test MOG POS 12-backoff tagger"""
tagger = POSTag('middle_low_german')
tagged = tagger.tag_ngram_12_backoff('Jck Johannes preister verwarer vnde voirs tender des Juncfrouwen kloisters to Mariendale')
self.assertTrue(tagged)
def test_pos_unigram_old_english(self):
"""Test tagging Old English POS with unigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_unigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_bigram_old_english(self):
"""Test tagging Old English POS with bigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_bigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_trigram_old_english(self):
"""Test tagging old_english POS with trigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_trigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_old_english(self):
"""Test tagging Old English POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_ngram_123_backoff('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_crf_tagger_old_english(self):
"""Test tagging Old English POS with CRF tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_crf('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_perceptron_tagger_old_english(self):
"""Test tagging Old English POS with Perceptron tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_perceptron('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
if __name__ == '__main__':
unittest.main()
| 47.952206 | 230 | 0.662501 |
import os
import shutil
import unittest
from cltk.corpus.utils.importer import CorpusImporter
from cltk.stem.latin.j_v import JVReplacer
from cltk.tag import ner
from cltk.tag.ner import NamedEntityReplacer
from cltk.tag.pos import POSTag
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('french')
corpus_importer.import_corpus('french_data_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/french/text/french_data_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter("old_norse")
corpus_importer.import_corpus("old_norse_models_cltk")
file_rel = os.path.join(get_cltk_data_dir() + '/old_norse/model/old_norse_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('middle_low_german')
corpus_importer.import_corpus('middle_low_german_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/middle_low_german/model/middle_low_german_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('old_english')
corpus_importer.import_corpus('old_english_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/old_english/model/old_english_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
def test_pos_unigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_unigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_bigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_bigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_trigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_trigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_ngram_123_backoff('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_tnt_tagger_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_tnt('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_unigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_unigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_bigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_bigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_trigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_trigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_ngram_123_backoff('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_tnt_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_tnt('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_crf_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_crf('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_check_latest_latin(self):
ner._check_latest_data('latin')
names_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_check_latest_latin(self):
path = get_cltk_data_dir() + '/latin/model/latin_models_cltk'
names_dir = os.path.expanduser(path)
shutil.rmtree(names_dir, ignore_errors=True)
ner._check_latest_data('latin')
names_path = os.path.join(names_dir, 'ner', 'proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_tag_ner_str_list_latin(self):
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.tag_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_latin(self):
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.tag_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_latin(self):
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
text = ner.tag_ner('latin', input_text=text_list_iu, output_type=str)
target = ' ut Uenus/Entity Sirius/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_latin(self):
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
text = ner.tag_ner('latin', input_text=text_str_iu, output_type=str)
target = ' ut Uenus/Entity, ut Sirius/Entity, ut Spica/Entity, ut aliae quae primae dicuntur esse mangitudinis.'
self.assertEqual(text, target)
def test_tag_ner_str_list_greek(self):
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
tokens = ner.tag_ner('greek', input_text=text_str, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity'), ('Κάππαρος', 'Entity'), ('Πρωτογενείας', 'Entity'), ('Διονυσιάδες', 'Entity'), ('τὴν',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_greek(self):
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
tokens = ner.tag_ner('greek', input_text=text_list, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_greek(self):
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
text = ner.tag_ner('greek', input_text=text_list, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_greek(self):
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
text = ner.tag_ner('greek', input_text=text_str, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity Κάππαρος/Entity Πρωτογενείας/Entity Διονυσιάδες/Entity τὴν'
self.assertEqual(text, target)
def test_tag_ner_str_list_french(self):
text_str = """Berte fu mere Charlemaine, qui pukis tint France et tot le Maine."""
ner_replacer = NamedEntityReplacer()
tokens = ner_replacer.tag_ner_fr(input_text=text_str, output_type=list)
target = [[('Berte', 'entity', 'CHI')], ('fu',), ('mere',), [('Charlemaine', 'entity', 'CHI')], (',',), ('qui',), ('pukis',),
('tint',), [('France', 'entity', 'LOC')], ('et',), ('tot',), ('le',), [('Maine', 'entity', 'LOC')], ('.',)]
self.assertEqual(tokens, target)
def test_pos_tnt_tagger_old_norse(self):
tagger = POSTag('old_norse')
tagged = tagger.tag_tnt('Hlióðs bið ek allar.')
print(tagged)
self.assertTrue(tagged)
def test_pos_ngram12_tagger_middle_low_german(self):
tagger = POSTag('middle_low_german')
tagged = tagger.tag_ngram_12_backoff('Jck Johannes preister verwarer vnde voirs tender des Juncfrouwen kloisters to Mariendale')
self.assertTrue(tagged)
def test_pos_unigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_unigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_bigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_bigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_trigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_trigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_ngram_123_backoff('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_crf_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_crf('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_perceptron_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_perceptron('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
if __name__ == '__main__':
unittest.main()
| true | true |
f72b2f24626e265d01ae282b3f14a253aa950b3b | 307 | py | Python | src/dataleach/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | 1 | 2021-11-08T13:57:52.000Z | 2021-11-08T13:57:52.000Z | src/dataleach/tests/dataleach/sources/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | null | null | null | src/dataleach/tests/dataleach/sources/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright © 2010, RedJack, LLC.
# All rights reserved.
#
# Please see the LICENSE.txt file in this distribution for license
# details.
# ----------------------------------------------------------------------
| 34.111111 | 72 | 0.361564 | true | true | |
f72b2fafee0e530b65dccaf38409dffa74760181 | 3,545 | py | Python | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from hddcoin.util.hddcoin_logging import initialize_logging
from hddcoin.util.config import load_config
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
from hddcoin.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
hostname = net_config["self_hostname"] if "host" not in config else config["host"]
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(hostname, port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("hddcoin_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| 30.560345 | 87 | 0.655289 | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from hddcoin.util.hddcoin_logging import initialize_logging
from hddcoin.util.config import load_config
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
from hddcoin.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
hostname = net_config["self_hostname"] if "host" not in config else config["host"]
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(hostname, port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("hddcoin_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| true | true |
f72b3050bfccbe4c42d8488a0a707b9ddf77dbd2 | 485 | py | Python | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | 3 | 2021-12-10T01:22:05.000Z | 2021-12-14T21:33:16.000Z | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | #!K:\2018_SS\BMW_Thesis\workspace_bmw\Thesis_KG_Agnostic_EL\scripts\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| 37.307692 | 91 | 0.709278 |
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| true | true |
f72b30581d8ef30df8d3b88fde755c65a6390087 | 15,737 | py | Python | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
使用LCQMC数据集,并将其转为word_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
使用LCQMC数据集,并将每个query其转为word_id,
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
# test_arr # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_) # [q1,...]
out_arr = []
for line in test_arr:
t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
# tf Dataset太难用,不如自己实现
# https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow
# dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction 为json格式。
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| 37.20331 | 129 | 0.599797 |
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t')
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t')
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
]
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_)
out_arr = []
for line in test_arr:
t1 = line
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| true | true |
f72b319c6f56785827dd2160e2b9d041dde23ada | 5,281 | py | Python | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
brac=True,
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
save_video=True,
image_env_kwargs=dict(
imsize=84,
init_camera=None, # the environment initializes the camera already
transpose=True,
normalize=True,
recompute_reward=False,
non_presampled_goal_img_is_garbage=True, # do not set_to_goal
),
dump_video_kwargs=dict(
exploration_goal_image_key="image_observation",
evaluation_goal_image_key="image_observation",
image_format="CWH",
),
# renderer_kwargs=dict(
# # width=84,
# # height=84,
# init_camera=None, # the environment initializes the camera already
# # transpose=True,
# create_image_format="HWC",
# output_image_format="CHW",
# # normalize=True,
# ),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-binary-old-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
'trainer_kwargs.clip_score': [2, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| 30.883041 | 80 | 0.566938 |
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
brac=True,
reward_transform_kwargs=None,
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
save_video=True,
image_env_kwargs=dict(
imsize=84,
init_camera=None, # the environment initializes the camera already
transpose=True,
normalize=True,
recompute_reward=False,
non_presampled_goal_img_is_garbage=True, # do not set_to_goal
),
dump_video_kwargs=dict(
exploration_goal_image_key="image_observation",
evaluation_goal_image_key="image_observation",
image_format="CWH",
),
# renderer_kwargs=dict(
# # width=84,
# # height=84,
# init_camera=None, # the environment initializes the camera already
# # transpose=True,
# create_image_format="HWC",
# output_image_format="CHW",
# # normalize=True,
# ),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-binary-old-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
'trainer_kwargs.clip_score': [2, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| true | true |
f72b32a4095f35d7bed6ab5e19378d3c4f4d06be | 1,876 | py | Python | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 5 | 2020-09-04T13:56:45.000Z | 2022-03-06T05:46:55.000Z | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 1 | 2021-08-17T07:11:42.000Z | 2021-08-17T07:11:42.000Z | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 2 | 2021-07-06T09:41:20.000Z | 2021-08-02T08:47:13.000Z | import pandas as pd
import csv
from builelib import airconditioning
import pytest
import json
import xlrd
### テストファイル名 ###
# 辞書型 テスト名とファイル名
testcase_dict = {
"AHU_basic": "./tests/airconditioning/★空調設備テストケース一覧.xlsx",
}
def convert2number(x, default):
'''
空欄にデフォルト値を代入する
'''
if x == "":
x = default
else:
x = float(x)
return x
def read_testcasefile(filename):
'''
テストケースファイルを読み込む関数
'''
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name("Sheet1")
testdata = [sheet.row_values(row) for row in range(sheet.nrows)]
return testdata
#### テストケースファイルの読み込み
test_to_try = [] # テスト用入力ファイルと期待値のリスト
testcase_id = [] # テスト名称のリスト
for case_name in testcase_dict:
# テストファイルの読み込み
testfiledata = read_testcasefile(testcase_dict[case_name])
# ヘッダーの削除
testfiledata.pop(0)
# テストケース(行)に対するループ
for testdata in testfiledata:
filename = "./tests/airconditioning/ACtest_" + testdata[0] + ".json"
# 入力データの作成
with open(filename, 'r', encoding='utf-8') as f:
inputdata = json.load(f)
# 期待値
expectedvalue = (testdata[4])
# テストケースの集約
test_to_try.append( (inputdata, expectedvalue) )
# テストケース名
testcase_id.append(case_name + testdata[0])
# テストの実施
@pytest.mark.parametrize('inputdata, expectedvalue', test_to_try, ids=testcase_id)
def test_calc(inputdata, expectedvalue):
# 検証用
with open("inputdata.json",'w', encoding='utf-8') as fw:
json.dump(inputdata, fw, indent=4, ensure_ascii=False)
# 計算実行
resultJson = airconditioning.calc_energy(inputdata)
diff_Eac = (abs(resultJson["E_airconditioning"] - expectedvalue)) / abs( expectedvalue )
# 比較(0.01%まで)
assert diff_Eac < 0.0001
if __name__ == '__main__':
print('--- test_airconditioning.py ---')
| 21.563218 | 92 | 0.647122 | import pandas as pd
import csv
from builelib import airconditioning
import pytest
import json
import xlrd
": "./tests/airconditioning/★空調設備テストケース一覧.xlsx",
}
def convert2number(x, default):
if x == "":
x = default
else:
x = float(x)
return x
def read_testcasefile(filename):
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name("Sheet1")
testdata = [sheet.row_values(row) for row in range(sheet.nrows)]
return testdata
me in testcase_dict:
testfiledata = read_testcasefile(testcase_dict[case_name])
testfiledata.pop(0)
for testdata in testfiledata:
filename = "./tests/airconditioning/ACtest_" + testdata[0] + ".json"
with open(filename, 'r', encoding='utf-8') as f:
inputdata = json.load(f)
expectedvalue = (testdata[4])
test_to_try.append( (inputdata, expectedvalue) )
testcase_id.append(case_name + testdata[0])
@pytest.mark.parametrize('inputdata, expectedvalue', test_to_try, ids=testcase_id)
def test_calc(inputdata, expectedvalue):
with open("inputdata.json",'w', encoding='utf-8') as fw:
json.dump(inputdata, fw, indent=4, ensure_ascii=False)
resultJson = airconditioning.calc_energy(inputdata)
diff_Eac = (abs(resultJson["E_airconditioning"] - expectedvalue)) / abs( expectedvalue )
assert diff_Eac < 0.0001
if __name__ == '__main__':
print('--- test_airconditioning.py ---')
| true | true |
f72b338ae3488cd29a445fe80006558b89a53eb0 | 3,209 | py | Python | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | 1 | 2020-01-08T12:09:14.000Z | 2020-01-08T12:09:14.000Z | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | null | null | null | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | 1 | 2020-10-30T10:33:19.000Z | 2020-10-30T10:33:19.000Z | ##
# EPITECH PROJECT, 2019
# MoviePi
# File description:
# utils.py
##
import datetime
import jwt
from moviepiapi.dbHelper import dbHelper
from moviepiapi.userHelper import userHelper
ret_packet = {'responseStatus': 0, 'message': "", 'data': any}
Key = 'MoviePiTheoAudreyHicham'
LEN_MAX_USER = 255
db = dbHelper('moviepi_api', 'moviepi_api', 'moviepi', '51.75.141.254')
userH = userHelper(db, LEN_MAX_USER)
def fill_return_packet(iswork, typeoferror, data):
ret_packet['responseStatus'] = iswork
ret_packet['message'] = typeoferror
ret_packet['data'] = data
return ret_packet
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
Key,
algorithm='HS256'
).decode('utf-8')
except Exception as e:
return e
def check_auth_token(request):
auth_headers = request.headers.get('Authorization', '').split()
if len(auth_headers) != 2:
return None
try:
payload = jwt.decode(auth_headers[1], Key)
return payload['sub']
except jwt.ExpiredSignatureError:
return False
except jwt.InvalidTokenError:
return False
return False
def make_average_weight(list):
result = 0.0
if not list:
return -1
for i in range(len(list)):
result = result + float(list[i])
result = result / len(list)
print(result)
return(result)
def adjust_weight_user(film_id, note, id_user):
weight_list = []
idgenre_list = []
already_genre = []
all_genre_user = []
new_weight = []
result = db.request(
"SELECT fk_genres FROM films_genres WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Pas de genre trouvés pour ce film", None)
idgenre_list = result[0]['fk_genres'].split(',')
for i in range(len(idgenre_list)):
idgenre_list[i] = int(idgenre_list[i])
result_user = db.request(
"SELECT fk_genres, weight FROM users_genres WHERE fk_users=%s", str(id_user))
if not result_user:
return False
for i in range(len(result_user)):
already_genre.append(int(result_user[i]['fk_genres']))
final_list = list(set(idgenre_list).union(set(already_genre)))
for i in range(len(final_list)):
for y in range(len(result)):
if final_list[i] == result_user[y]['fk_genres']:
new_weight.append(
(int(result_user[y]['weight']) / len(final_list)) * int(note))
else:
new_weight.append(1)
for i in range(len(new_weight)):
print(id_user, final_list[i], new_weight[i])
if final_list[i] in already_genre:
db.request("UPDATE users_genres SET weight = %s WHERE fk_users = %s AND fk_genres = %s",
new_weight[i], id_user, final_list[i])
else:
db.insert("INSERT INTO users_genres (fk_users, fk_genres, weight) VALUES (%s, %s, %s)",
id_user, final_list[i], new_weight[i])
return True
| 30.561905 | 100 | 0.621377 |
import datetime
import jwt
from moviepiapi.dbHelper import dbHelper
from moviepiapi.userHelper import userHelper
ret_packet = {'responseStatus': 0, 'message': "", 'data': any}
Key = 'MoviePiTheoAudreyHicham'
LEN_MAX_USER = 255
db = dbHelper('moviepi_api', 'moviepi_api', 'moviepi', '51.75.141.254')
userH = userHelper(db, LEN_MAX_USER)
def fill_return_packet(iswork, typeoferror, data):
ret_packet['responseStatus'] = iswork
ret_packet['message'] = typeoferror
ret_packet['data'] = data
return ret_packet
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
Key,
algorithm='HS256'
).decode('utf-8')
except Exception as e:
return e
def check_auth_token(request):
auth_headers = request.headers.get('Authorization', '').split()
if len(auth_headers) != 2:
return None
try:
payload = jwt.decode(auth_headers[1], Key)
return payload['sub']
except jwt.ExpiredSignatureError:
return False
except jwt.InvalidTokenError:
return False
return False
def make_average_weight(list):
result = 0.0
if not list:
return -1
for i in range(len(list)):
result = result + float(list[i])
result = result / len(list)
print(result)
return(result)
def adjust_weight_user(film_id, note, id_user):
weight_list = []
idgenre_list = []
already_genre = []
all_genre_user = []
new_weight = []
result = db.request(
"SELECT fk_genres FROM films_genres WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Pas de genre trouvés pour ce film", None)
idgenre_list = result[0]['fk_genres'].split(',')
for i in range(len(idgenre_list)):
idgenre_list[i] = int(idgenre_list[i])
result_user = db.request(
"SELECT fk_genres, weight FROM users_genres WHERE fk_users=%s", str(id_user))
if not result_user:
return False
for i in range(len(result_user)):
already_genre.append(int(result_user[i]['fk_genres']))
final_list = list(set(idgenre_list).union(set(already_genre)))
for i in range(len(final_list)):
for y in range(len(result)):
if final_list[i] == result_user[y]['fk_genres']:
new_weight.append(
(int(result_user[y]['weight']) / len(final_list)) * int(note))
else:
new_weight.append(1)
for i in range(len(new_weight)):
print(id_user, final_list[i], new_weight[i])
if final_list[i] in already_genre:
db.request("UPDATE users_genres SET weight = %s WHERE fk_users = %s AND fk_genres = %s",
new_weight[i], id_user, final_list[i])
else:
db.insert("INSERT INTO users_genres (fk_users, fk_genres, weight) VALUES (%s, %s, %s)",
id_user, final_list[i], new_weight[i])
return True
| true | true |
f72b33a87fd87b89f914f36a973b364e5a397d6d | 471 | py | Python | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import roslib; roslib.load_manifest('basics')
import rospy
import actionlib
from basics.msg import TimerAction, TimerGoal, TimerResult
rospy.init_node('timer_action_client')
client = actionlib.SimpleActionClient('timer', TimerAction)
client.wait_for_server()
goal = TimerGoal()
goal.time_to_wait = rospy.Duration.from_sec(5.0)
client.send_goal(goal)
client.wait_for_result()
print('Time elapsed: %f'%(client.get_result().time_elapsed.to_sec()))
| 27.705882 | 69 | 0.794055 |
import roslib; roslib.load_manifest('basics')
import rospy
import actionlib
from basics.msg import TimerAction, TimerGoal, TimerResult
rospy.init_node('timer_action_client')
client = actionlib.SimpleActionClient('timer', TimerAction)
client.wait_for_server()
goal = TimerGoal()
goal.time_to_wait = rospy.Duration.from_sec(5.0)
client.send_goal(goal)
client.wait_for_result()
print('Time elapsed: %f'%(client.get_result().time_elapsed.to_sec()))
| true | true |
f72b34ac6ea7004cf31e6dccd1805b12ef0d95bf | 2,106 | py | Python | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from obspy.geodetics.base import gps2dist_azimuth
from gmprocess.waveform_processing.clipping.clipping_ann import clipNet
from gmprocess.waveform_processing.clipping.max_amp import Max_Amp
from gmprocess.waveform_processing.clipping.histogram import Histogram
from gmprocess.waveform_processing.clipping.ping import Ping
M_TO_KM = 1.0 / 1000
def check_clipping(st, origin, threshold=0.2):
"""Apply clicking check.
Lower thresholds will pass fewer streams but will give less false negatives
(i.e., streams in which clipping actually occurred but were missed).
Args:
st (StationStream):
Trace of data.
origin (ScalarEvent):
ScalarEvent object.
threshold (float):
Threshold probability.
Returns:
StationStream checked for clipping.
"""
# Don't bother with test for strong motion instruments
chan_code = st.get_id().split(".")[2]
if chan_code[1] == "N":
return st
# Don't bother with test if it has already failed
if not st.passed:
return st
event_mag = origin.magnitude
event_lon = origin.longitude
event_lat = origin.latitude
dist = (
gps2dist_azimuth(
lat1=event_lat,
lon1=event_lon,
lat2=st[0].stats["coordinates"]["latitude"],
lon2=st[0].stats["coordinates"]["longitude"],
)[0]
* M_TO_KM
)
# Clip mag/dist to range of training dataset
event_mag = np.clip(event_mag, 4.0, 8.8)
dist = np.clip(dist, 0.0, 445.0)
clip_nnet = clipNet()
max_amp_method = Max_Amp(st, max_amp_thresh=6e6)
hist_method = Histogram(st)
ping_method = Ping(st)
inputs = [
event_mag,
dist,
max_amp_method.is_clipped,
hist_method.is_clipped,
ping_method.is_clipped,
]
prob_clip = clip_nnet.evaluate(inputs)[0][0]
if prob_clip >= threshold:
for tr in st:
tr.fail(f"Failed clipping check: prob_clip = {prob_clip:.2f}.")
return st
| 26.658228 | 79 | 0.646724 |
import numpy as np
from obspy.geodetics.base import gps2dist_azimuth
from gmprocess.waveform_processing.clipping.clipping_ann import clipNet
from gmprocess.waveform_processing.clipping.max_amp import Max_Amp
from gmprocess.waveform_processing.clipping.histogram import Histogram
from gmprocess.waveform_processing.clipping.ping import Ping
M_TO_KM = 1.0 / 1000
def check_clipping(st, origin, threshold=0.2):
chan_code = st.get_id().split(".")[2]
if chan_code[1] == "N":
return st
# Don't bother with test if it has already failed
if not st.passed:
return st
event_mag = origin.magnitude
event_lon = origin.longitude
event_lat = origin.latitude
dist = (
gps2dist_azimuth(
lat1=event_lat,
lon1=event_lon,
lat2=st[0].stats["coordinates"]["latitude"],
lon2=st[0].stats["coordinates"]["longitude"],
)[0]
* M_TO_KM
)
event_mag = np.clip(event_mag, 4.0, 8.8)
dist = np.clip(dist, 0.0, 445.0)
clip_nnet = clipNet()
max_amp_method = Max_Amp(st, max_amp_thresh=6e6)
hist_method = Histogram(st)
ping_method = Ping(st)
inputs = [
event_mag,
dist,
max_amp_method.is_clipped,
hist_method.is_clipped,
ping_method.is_clipped,
]
prob_clip = clip_nnet.evaluate(inputs)[0][0]
if prob_clip >= threshold:
for tr in st:
tr.fail(f"Failed clipping check: prob_clip = {prob_clip:.2f}.")
return st
| true | true |
f72b36d5fb0cf98eeb2d459b179cfde55b038f13 | 2,311 | py | Python | model.py | bhardwajRahul/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 15 | 2018-06-03T16:35:16.000Z | 2022-02-13T16:36:37.000Z | model.py | bhardwajRahul/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 2 | 2019-02-11T07:03:09.000Z | 2021-02-25T09:16:15.000Z | model.py | navi25/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 9 | 2019-02-08T11:17:34.000Z | 2022-01-29T00:27:14.000Z | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
ma = Marshmallow()
db = SQLAlchemy()
redis_cache = FlaskRedis()
class FoodModel(db.Model):
__tablename__ = 'foods'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
description = db.Column(db.String(250))
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel', backref=db.backref('foods', lazy='dynamic' ))
menu_id = db.Column(db.Integer, db.ForeignKey('menus.id', ondelete='CASCADE'), nullable=False)
menu = db.relationship('MenuModel')
def __init__(self, name, description, restaurant_id, menu_id):
self.name = name
self.description = description
self.restaurant_id = restaurant_id
self.menu_id = menu_id
class MenuModel(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel')
def __init__(self, name, restaurant_id):
self.name = name
self.restaurant_id = restaurant_id
class RestaurantModel(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), unique=True, nullable=False)
def __init__(self, name):
self.name = name
class RestaurantSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
class MenuSchema(ma.Schema):
id = fields.Integer()
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True)
class FoodSchema(ma.Schema):
id = fields.Integer(dump_only=True)
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True, validate=validate.Length(1))
description = fields.String()
creation_date = fields.DateTime()
| 35.553846 | 110 | 0.719169 | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
ma = Marshmallow()
db = SQLAlchemy()
redis_cache = FlaskRedis()
class FoodModel(db.Model):
__tablename__ = 'foods'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
description = db.Column(db.String(250))
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel', backref=db.backref('foods', lazy='dynamic' ))
menu_id = db.Column(db.Integer, db.ForeignKey('menus.id', ondelete='CASCADE'), nullable=False)
menu = db.relationship('MenuModel')
def __init__(self, name, description, restaurant_id, menu_id):
self.name = name
self.description = description
self.restaurant_id = restaurant_id
self.menu_id = menu_id
class MenuModel(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel')
def __init__(self, name, restaurant_id):
self.name = name
self.restaurant_id = restaurant_id
class RestaurantModel(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), unique=True, nullable=False)
def __init__(self, name):
self.name = name
class RestaurantSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
class MenuSchema(ma.Schema):
id = fields.Integer()
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True)
class FoodSchema(ma.Schema):
id = fields.Integer(dump_only=True)
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True, validate=validate.Length(1))
description = fields.String()
creation_date = fields.DateTime()
| true | true |
f72b36f1c01c85d1f6f16819bc764c32780c7fb6 | 22,006 | py | Python | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ShareArgs', 'Share']
@pulumi.input_type
class ShareArgs:
def __init__(__self__, *,
access_protocol: pulumi.Input[Union[str, 'ShareAccessProtocol']],
device_name: pulumi.Input[str],
monitoring_status: pulumi.Input[Union[str, 'MonitoringStatus']],
resource_group_name: pulumi.Input[str],
share_status: pulumi.Input[Union[str, 'ShareStatus']],
azure_container_info: Optional[pulumi.Input['AzureContainerInfoArgs']] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input['RefreshDetailsArgs']] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]] = None):
"""
The set of arguments for constructing a Share resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input['AzureContainerInfoArgs'] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input['RefreshDetailsArgs'] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
pulumi.set(__self__, "access_protocol", access_protocol)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "monitoring_status", monitoring_status)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_status", share_status)
if azure_container_info is not None:
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights is not None:
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy is not None:
pulumi.set(__self__, "data_policy", data_policy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_details is not None:
pulumi.set(__self__, "refresh_details", refresh_details)
if user_access_rights is not None:
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Input[Union[str, 'ShareAccessProtocol']]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@access_protocol.setter
def access_protocol(self, value: pulumi.Input[Union[str, 'ShareAccessProtocol']]):
pulumi.set(self, "access_protocol", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Input[Union[str, 'MonitoringStatus']]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: pulumi.Input[Union[str, 'MonitoringStatus']]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Input[Union[str, 'ShareStatus']]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: pulumi.Input[Union[str, 'ShareStatus']]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional[pulumi.Input['AzureContainerInfoArgs']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@azure_container_info.setter
def azure_container_info(self, value: Optional[pulumi.Input['AzureContainerInfoArgs']]):
pulumi.set(self, "azure_container_info", value)
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@client_access_rights.setter
def client_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]):
pulumi.set(self, "client_access_rights", value)
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[pulumi.Input[Union[str, 'DataPolicy']]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@data_policy.setter
def data_policy(self, value: Optional[pulumi.Input[Union[str, 'DataPolicy']]]):
pulumi.set(self, "data_policy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The share name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional[pulumi.Input['RefreshDetailsArgs']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@refresh_details.setter
def refresh_details(self, value: Optional[pulumi.Input['RefreshDetailsArgs']]):
pulumi.set(self, "refresh_details", value)
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
@user_access_rights.setter
def user_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]):
pulumi.set(self, "user_access_rights", value)
class Share(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input[pulumi.InputType['RefreshDetailsArgs']] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param ShareArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareArgs.__new__(ShareArgs)
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__.__dict__["access_protocol"] = access_protocol
__props__.__dict__["azure_container_info"] = azure_container_info
__props__.__dict__["client_access_rights"] = client_access_rights
__props__.__dict__["data_policy"] = data_policy
__props__.__dict__["description"] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__.__dict__["monitoring_status"] = monitoring_status
__props__.__dict__["name"] = name
__props__.__dict__["refresh_details"] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__.__dict__["share_status"] = share_status
__props__.__dict__["user_access_rights"] = user_access_rights
__props__.__dict__["share_mappings"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-native:databoxedge:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-native:databoxedge/v20200501preview:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
"""
Get an existing Share resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareArgs.__new__(ShareArgs)
__props__.__dict__["access_protocol"] = None
__props__.__dict__["azure_container_info"] = None
__props__.__dict__["client_access_rights"] = None
__props__.__dict__["data_policy"] = None
__props__.__dict__["description"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["share_mappings"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_access_rights"] = None
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
| 48.578366 | 1,294 | 0.668045 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ShareArgs', 'Share']
@pulumi.input_type
class ShareArgs:
def __init__(__self__, *,
access_protocol: pulumi.Input[Union[str, 'ShareAccessProtocol']],
device_name: pulumi.Input[str],
monitoring_status: pulumi.Input[Union[str, 'MonitoringStatus']],
resource_group_name: pulumi.Input[str],
share_status: pulumi.Input[Union[str, 'ShareStatus']],
azure_container_info: Optional[pulumi.Input['AzureContainerInfoArgs']] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input['RefreshDetailsArgs']] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]] = None):
pulumi.set(__self__, "access_protocol", access_protocol)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "monitoring_status", monitoring_status)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_status", share_status)
if azure_container_info is not None:
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights is not None:
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy is not None:
pulumi.set(__self__, "data_policy", data_policy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_details is not None:
pulumi.set(__self__, "refresh_details", refresh_details)
if user_access_rights is not None:
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Input[Union[str, 'ShareAccessProtocol']]:
return pulumi.get(self, "access_protocol")
@access_protocol.setter
def access_protocol(self, value: pulumi.Input[Union[str, 'ShareAccessProtocol']]):
pulumi.set(self, "access_protocol", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Input[Union[str, 'MonitoringStatus']]:
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: pulumi.Input[Union[str, 'MonitoringStatus']]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Input[Union[str, 'ShareStatus']]:
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: pulumi.Input[Union[str, 'ShareStatus']]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional[pulumi.Input['AzureContainerInfoArgs']]:
return pulumi.get(self, "azure_container_info")
@azure_container_info.setter
def azure_container_info(self, value: Optional[pulumi.Input['AzureContainerInfoArgs']]):
pulumi.set(self, "azure_container_info", value)
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]:
return pulumi.get(self, "client_access_rights")
@client_access_rights.setter
def client_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]):
pulumi.set(self, "client_access_rights", value)
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[pulumi.Input[Union[str, 'DataPolicy']]]:
return pulumi.get(self, "data_policy")
@data_policy.setter
def data_policy(self, value: Optional[pulumi.Input[Union[str, 'DataPolicy']]]):
pulumi.set(self, "data_policy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional[pulumi.Input['RefreshDetailsArgs']]:
return pulumi.get(self, "refresh_details")
@refresh_details.setter
def refresh_details(self, value: Optional[pulumi.Input['RefreshDetailsArgs']]):
pulumi.set(self, "refresh_details", value)
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]:
return pulumi.get(self, "user_access_rights")
@user_access_rights.setter
def user_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]):
pulumi.set(self, "user_access_rights", value)
class Share(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareArgs.__new__(ShareArgs)
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__.__dict__["access_protocol"] = access_protocol
__props__.__dict__["azure_container_info"] = azure_container_info
__props__.__dict__["client_access_rights"] = client_access_rights
__props__.__dict__["data_policy"] = data_policy
__props__.__dict__["description"] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__.__dict__["monitoring_status"] = monitoring_status
__props__.__dict__["name"] = name
__props__.__dict__["refresh_details"] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__.__dict__["share_status"] = share_status
__props__.__dict__["user_access_rights"] = user_access_rights
__props__.__dict__["share_mappings"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-native:databoxedge:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-native:databoxedge/v20200501preview:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareArgs.__new__(ShareArgs)
__props__.__dict__["access_protocol"] = None
__props__.__dict__["azure_container_info"] = None
__props__.__dict__["client_access_rights"] = None
__props__.__dict__["data_policy"] = None
__props__.__dict__["description"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["share_mappings"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_access_rights"] = None
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "share_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
return pulumi.get(self, "user_access_rights")
| true | true |
f72b36f52912edb8de8bb2207281239f45df89b6 | 2,134 | py | Python | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# FileName:orm.py
# -*- coding: utf-8 -*-
""" 通过元类实现简单的ORM框剪 """
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class ModelMetaClass(type):
def __new__(cls, name, base, attrs):
if name == 'Model':
return type.__new__(cls, name, base, attrs)
print('Found Model: %s' % name)
mapping = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping %s ==> %s' % (k, v))
mapping[k] = v
for k in mapping.keys():
attrs.pop(k)
attrs['__mapping__'] = mapping
attrs['__table__'] = name
return type.__new__(cls, name, base, attrs)
class Model(dict, metaclass=ModelMetaClass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object do not has attribute %s" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mapping__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
# test code
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=123, name='Michel', email='abc@jd.com', password='pass')
u.save()
| 27.012658 | 101 | 0.56701 |
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class ModelMetaClass(type):
def __new__(cls, name, base, attrs):
if name == 'Model':
return type.__new__(cls, name, base, attrs)
print('Found Model: %s' % name)
mapping = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping %s ==> %s' % (k, v))
mapping[k] = v
for k in mapping.keys():
attrs.pop(k)
attrs['__mapping__'] = mapping
attrs['__table__'] = name
return type.__new__(cls, name, base, attrs)
class Model(dict, metaclass=ModelMetaClass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object do not has attribute %s" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mapping__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=123, name='Michel', email='abc@jd.com', password='pass')
u.save()
| true | true |
f72b389f11090c3291e690acc9bf38811dce7cae | 1,162 | py | Python | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/util/oclass_utils.py | cibervicho/daos | 3868c758c696d0a7973ac2b93b56d986fabfc6e0 | [
"BSD-2-Clause-Patent"
] | null | null | null | #!/usr/bin/env python3
"""
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
def extract_redundancy_factor(oclass):
"""Extract the redundancy factor from an object class.
Args:
oclass (str): the object class.
Returns:
int: the redundancy factor.
"""
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
def calculate_min_servers(oclass):
"""Calculate the minimum number of required servers for an object class.
Args:
oclass (str): the object class.
Returns:
int: the minimum number of required servers.
"""
patterns = [
"EC_([0-9]+)P([0-9])+",
"RP_([0-9]+)"
]
for pattern in patterns:
# Findall returns a list where each element is a tuple of groups ()
match = re.findall(pattern, oclass)
if match:
# Sum all groups (). Only index 0 should exist.
return sum(int(n) for n in match[0])
return 1
| 23.714286 | 76 | 0.593804 |
import re
def extract_redundancy_factor(oclass):
match = re.search("EC_[0-9]+P([0-9])+", oclass)
if match:
return int(match.group(1))
match = re.search("RP_([0-9]+)", oclass)
if match:
return int(match.group(1)) - 1
return 0
def calculate_min_servers(oclass):
patterns = [
"EC_([0-9]+)P([0-9])+",
"RP_([0-9]+)"
]
for pattern in patterns:
match = re.findall(pattern, oclass)
if match:
return sum(int(n) for n in match[0])
return 1
| true | true |
f72b3967e92f28affb77f904b54628581c7af2bf | 7,799 | py | Python | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 904 | 2016-10-11T13:35:19.000Z | 2022-03-25T09:29:09.000Z | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 1,866 | 2016-10-15T21:28:09.000Z | 2022-03-29T18:09:20.000Z | galaxy/api/v2/tests/test_collection_views.py | bmclaughlin/galaxy | 3f57e3684c27cb88d45881eaec16dc3095ac4e6d | [
"Apache-2.0"
] | 368 | 2016-10-11T13:44:08.000Z | 2022-03-30T02:23:12.000Z | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import hashlib
from unittest import mock
import os
import tempfile
import shutil
import tarfile
from contextlib import contextmanager
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
@contextmanager
def tar_archive_available():
dir = tempfile.mkdtemp()
file_path = os.path.join(dir, 'mynamespace-mycollection-1.2.3.tar.gz')
with open(file_path, 'wb') as fp:
tar = tarfile.open(fileobj=fp, mode='w')
tar.close()
yield file_path
shutil.rmtree(dir)
class TestCollectionListView(APITestCase):
url = '/api/v2/collections/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='secret')
self.namespace = models.Namespace.objects.create(name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='secret')
patcher = mock.patch('galaxy.common.tasking.create_task')
self.create_task_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_upload(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
})
self.create_task_mock.assert_called_once()
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
def test_upload_w_sha(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes).hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
self.create_task_mock.assert_called_once()
def test_upload_w_invalid_sha(self):
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes + b'x').hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'The sha256 checksum did not match.'
}
def test_upload_invalid_namespace(self):
open_mock = mock.mock_open(read_data='Test data')
with open_mock() as fp:
fp.name = 'wrongnamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'Namespace "wrongnamespace" does not exist.'
}
def test_upload_version_conflict(self):
collection = models.Collection.objects.create(
namespace=self.namespace, name='mycollection')
models.CollectionVersion.objects.create(
collection=collection, version='1.2.3')
open_mock = mock.mock_open(read_data=b'Test data')
with open_mock() as fp:
fp.name = 'mynamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp
})
assert response.status_code == http_codes.HTTP_409_CONFLICT
assert response.json() == {
'code': 'conflict.collection_exists',
'message': 'Collection "mynamespace-mycollection-1.2.3"'
' already exists.'
}
def test_get_collection_list(self):
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
response = self.client.get(self.url)
result = response.json()
assert response.status_code == http_codes.HTTP_200_OK
assert result['count'] == 1
assert result['results'][0]['name'] == 'mycollection'
def test_fail_method_not_allowed(self):
for method in ['PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url)
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
class TestCollectionDetailView(APITestCase):
url_id = '/api/v2/collections/{pk}/'
url_name = '/api/v2/collections/{ns}/{name}/'
def setUp(self):
super().setUp()
self.namespace = models.Namespace.objects.create(
name='mynamespace')
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
self.version1 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.2')
self.version2 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.12')
self.version3 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.0.1')
self.collection.latest_version = self.version2
self.collection.save()
def test_view_success(self):
urls = [
self.url_id.format(pk=self.collection.pk),
self.url_name.format(
ns=self.namespace.name,
name=self.collection.name,
),
]
for url in urls:
response = self.client.get(url)
assert response.status_code == http_codes.HTTP_200_OK
result = response.json()
assert result['id'] == self.collection.pk
assert result['href'] == f'http://testserver{urls[1]}'
assert result['name'] == self.collection.name
assert result['namespace']['name'] == self.namespace.name
assert result['versions_url'] == \
f'http://testserver{urls[1]}versions/'
assert (result['latest_version']['version'] ==
self.version2.version)
assert result['deprecated'] is False
def test_view_404(self):
response = self.client.get(self.url_id.format(pk=self.collection.pk+1))
assert response.status_code == http_codes.HTTP_404_NOT_FOUND
| 36.443925 | 79 | 0.62149 |
import hashlib
from unittest import mock
import os
import tempfile
import shutil
import tarfile
from contextlib import contextmanager
from django.contrib.auth import get_user_model
from rest_framework.test import APITestCase
from rest_framework import status as http_codes
from galaxy.main import models
UserModel = get_user_model()
@contextmanager
def tar_archive_available():
dir = tempfile.mkdtemp()
file_path = os.path.join(dir, 'mynamespace-mycollection-1.2.3.tar.gz')
with open(file_path, 'wb') as fp:
tar = tarfile.open(fileobj=fp, mode='w')
tar.close()
yield file_path
shutil.rmtree(dir)
class TestCollectionListView(APITestCase):
url = '/api/v2/collections/'
def setUp(self):
super().setUp()
self.user = UserModel.objects.create_user(
username='testuser', password='secret')
self.namespace = models.Namespace.objects.create(name='mynamespace')
self.namespace.owners.set([self.user])
self.client.login(username=self.user.username, password='secret')
patcher = mock.patch('galaxy.common.tasking.create_task')
self.create_task_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_upload(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
})
self.create_task_mock.assert_called_once()
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
def test_upload_w_sha(self):
task_obj = models.ImportTask(id=42)
self.create_task_mock.return_value = task_obj
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes).hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_202_ACCEPTED
assert response.json() == {
'task': 'http://testserver/api/v2/collection-imports/42/'}
self.create_task_mock.assert_called_once()
def test_upload_w_invalid_sha(self):
with tar_archive_available() as file_path:
with open(file_path, 'rb') as fp:
bytes = fp.read()
file_sha256 = hashlib.sha256(bytes + b'x').hexdigest()
with open(file_path, 'r') as fp:
response = self.client.post(self.url, data={
'file': fp,
'sha256': file_sha256,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'The sha256 checksum did not match.'
}
def test_upload_invalid_namespace(self):
open_mock = mock.mock_open(read_data='Test data')
with open_mock() as fp:
fp.name = 'wrongnamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp,
})
assert response.status_code == http_codes.HTTP_400_BAD_REQUEST
assert response.json() == {
'code': 'invalid',
'message': 'Namespace "wrongnamespace" does not exist.'
}
def test_upload_version_conflict(self):
collection = models.Collection.objects.create(
namespace=self.namespace, name='mycollection')
models.CollectionVersion.objects.create(
collection=collection, version='1.2.3')
open_mock = mock.mock_open(read_data=b'Test data')
with open_mock() as fp:
fp.name = 'mynamespace-mycollection-1.2.3.tar.gz'
response = self.client.post(self.url, data={
'file': fp
})
assert response.status_code == http_codes.HTTP_409_CONFLICT
assert response.json() == {
'code': 'conflict.collection_exists',
'message': 'Collection "mynamespace-mycollection-1.2.3"'
' already exists.'
}
def test_get_collection_list(self):
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
response = self.client.get(self.url)
result = response.json()
assert response.status_code == http_codes.HTTP_200_OK
assert result['count'] == 1
assert result['results'][0]['name'] == 'mycollection'
def test_fail_method_not_allowed(self):
for method in ['PUT', 'PATCH', 'DELETE']:
response = self.client.generic(method, self.url)
assert (response.status_code
== http_codes.HTTP_405_METHOD_NOT_ALLOWED)
class TestCollectionDetailView(APITestCase):
url_id = '/api/v2/collections/{pk}/'
url_name = '/api/v2/collections/{ns}/{name}/'
def setUp(self):
super().setUp()
self.namespace = models.Namespace.objects.create(
name='mynamespace')
self.collection = models.Collection.objects.create(
namespace=self.namespace,
name='mycollection')
self.version1 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.2')
self.version2 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.1.12')
self.version3 = models.CollectionVersion.objects.create(
collection=self.collection, version='1.0.1')
self.collection.latest_version = self.version2
self.collection.save()
def test_view_success(self):
urls = [
self.url_id.format(pk=self.collection.pk),
self.url_name.format(
ns=self.namespace.name,
name=self.collection.name,
),
]
for url in urls:
response = self.client.get(url)
assert response.status_code == http_codes.HTTP_200_OK
result = response.json()
assert result['id'] == self.collection.pk
assert result['href'] == f'http://testserver{urls[1]}'
assert result['name'] == self.collection.name
assert result['namespace']['name'] == self.namespace.name
assert result['versions_url'] == \
f'http://testserver{urls[1]}versions/'
assert (result['latest_version']['version'] ==
self.version2.version)
assert result['deprecated'] is False
def test_view_404(self):
response = self.client.get(self.url_id.format(pk=self.collection.pk+1))
assert response.status_code == http_codes.HTTP_404_NOT_FOUND
| true | true |
f72b39da0ed6829e91b76ba1b8864ebef44e2299 | 1,043 | py | Python | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | null | null | null | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | null | null | null | cortex/options.py | lembert1990/pycortex | f1cd6abb5e193d0b7ec120107268dbbbf3a002fc | [
"BSD-2-Clause"
] | 1 | 2019-03-04T02:45:59.000Z | 2019-03-04T02:45:59.000Z | import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
from . import appdirs
cwd = os.path.split(os.path.abspath(__file__))[0]
userdir = appdirs.user_data_dir("pycortex", "JamesGao")
usercfg = os.path.join(userdir, "options.cfg")
# Read defaults from pycortex repo
config = configparser.ConfigParser()
config.read(os.path.join(cwd, 'defaults.cfg'))
# Update defaults with user-sepecifed values in user config
files_successfully_read = config.read(usercfg)
# If user config doesn't exist, create it
if len(files_successfully_read) == 0:
os.makedirs(userdir, exist_ok=True)
with open(usercfg, 'w') as fp:
config.write(fp)
#set default path in case the module is imported from the source code directory
if not config.has_option("basic", "filestore"):
config.set("basic", "filestore", os.path.join(cwd, os.pardir, "filestore/db"))
if not config.has_option("webgl", "colormaps"):
config.set("webgl", "colormaps", os.path.join(cwd, os.pardir, "filestore/colormaps"))
| 33.645161 | 89 | 0.731544 | import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
from . import appdirs
cwd = os.path.split(os.path.abspath(__file__))[0]
userdir = appdirs.user_data_dir("pycortex", "JamesGao")
usercfg = os.path.join(userdir, "options.cfg")
config = configparser.ConfigParser()
config.read(os.path.join(cwd, 'defaults.cfg'))
files_successfully_read = config.read(usercfg)
if len(files_successfully_read) == 0:
os.makedirs(userdir, exist_ok=True)
with open(usercfg, 'w') as fp:
config.write(fp)
#set default path in case the module is imported from the source code directory
if not config.has_option("basic", "filestore"):
config.set("basic", "filestore", os.path.join(cwd, os.pardir, "filestore/db"))
if not config.has_option("webgl", "colormaps"):
config.set("webgl", "colormaps", os.path.join(cwd, os.pardir, "filestore/colormaps"))
| true | true |
f72b39f30657384a75e0bf6bea346fedfc2a5b53 | 784 | py | Python | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | null | null | null | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | 4 | 2021-04-08T19:49:25.000Z | 2021-06-10T20:08:37.000Z | users/migrations/0002_auto_20200807_1424.py | Hogwarts250/lesson-discussion | 42aa0d5d4e4a8cb10c99ff7558e9d7a5f2f3e470 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.9 on 2020-08-07 21:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='buyer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='buyer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='transaction',
name='seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL),
),
]
| 30.153846 | 144 | 0.653061 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='buyer',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='buyer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='transaction',
name='seller',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f72b3a24ac71bb8d2a6c614a0679f5609ec4ff65 | 1,051 | py | Python | fedex/services/tracking.py | jzempel/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 19 | 2015-02-02T03:00:54.000Z | 2021-09-06T02:22:22.000Z | fedex/services/tracking.py | hongzhou-liu/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 2 | 2015-08-14T22:05:17.000Z | 2017-03-01T18:54:40.000Z | fedex/services/tracking.py | hongzhou-liu/fedex | 9c617767799217a80669c1ecca0c3a7667f82d71 | [
"BSD-3-Clause"
] | 53 | 2015-03-31T14:46:30.000Z | 2022-01-02T15:06:38.000Z | # -*- coding: utf-8 -*-
"""
fedex.services.tracking
~~~~~~~~~~~~~~~~~~~~~~~
FedEx tracking web services.
:copyright: 2014 by Jonathan Zempel.
:license: BSD, see LICENSE for more details.
"""
from .commons import BaseService
class TrackingService(BaseService):
"""Tracking service.
:param configuration: API configuration.
:param wsdl_version: Default ``10``.
"""
def __init__(self, configuration, wsdl_version=10):
super(TrackingService, self).__init__(configuration, "Track",
wsdl_version, "trck")
def create_selection_details(self):
"""Create a new selection details object.
"""
return self.create("TrackSelectionDetail")
def track(self, selection_details, **kwargs):
"""Track a package.
:param selection_details: Details to select the package to track.
:param kwargs: Additional service keyword arguments.
"""
kwargs["SelectionDetails"] = selection_details
return self.call("track", **kwargs)
| 26.275 | 73 | 0.636537 |
from .commons import BaseService
class TrackingService(BaseService):
def __init__(self, configuration, wsdl_version=10):
super(TrackingService, self).__init__(configuration, "Track",
wsdl_version, "trck")
def create_selection_details(self):
return self.create("TrackSelectionDetail")
def track(self, selection_details, **kwargs):
kwargs["SelectionDetails"] = selection_details
return self.call("track", **kwargs)
| true | true |
f72b3aaaffce70e47700f8e60f660a0f8a235566 | 810 | py | Python | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 2 | 2021-09-26T07:03:54.000Z | 2022-02-21T15:46:30.000Z | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | null | null | null | utils/logger.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 1 | 2021-04-16T06:11:41.000Z | 2021-04-16T06:11:41.000Z | import logging
import time
import os
import sys
def create_logger(final_output_path, description=None):
if description is None:
log_file = '{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'))
else:
log_file = '{}_{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'), description)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file),
format=head)
clogger = logging.getLogger()
clogger.setLevel(logging.INFO)
# add handler
# print to stdout and log file
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
clogger.addHandler(ch)
return clogger | 33.75 | 89 | 0.653086 | import logging
import time
import os
import sys
def create_logger(final_output_path, description=None):
if description is None:
log_file = '{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'))
else:
log_file = '{}_{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'), description)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file),
format=head)
clogger = logging.getLogger()
clogger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
clogger.addHandler(ch)
return clogger | true | true |
f72b3aca2cee2eefced9efcef5337ee9bdadcbda | 719 | py | Python | arrays/height_checker.py | ChristianChiarulli/leetcode | 6920cea51b61feae9038b185c0b1172a93f6316a | [
"MIT"
] | 4 | 2020-12-09T03:53:21.000Z | 2021-03-30T12:28:21.000Z | .old/arrays/height_checker.py | ChristianChiarulli/data_structures_and_algorithms | 05c5c7c9db0cc0a15e83ba20d5bf4f6534b08fc1 | [
"MIT"
] | null | null | null | .old/arrays/height_checker.py | ChristianChiarulli/data_structures_and_algorithms | 05c5c7c9db0cc0a15e83ba20d5bf4f6534b08fc1 | [
"MIT"
] | 2 | 2020-06-12T17:00:07.000Z | 2020-07-13T20:13:56.000Z | # Students are asked to stand in non-decreasing
# order of heights for an annual photo.
# Return the minimum number of students that must
# move in order for all students to be standing in
# non-decreasing order of height.
# Notice that when a group of students is selected
# they can reorder in any possible way between themselves
# and the non selected students remain on their seats.
def heightchecker(heights):
swap_count = 0
sorted_heights = sorted(heights)
if heights == sorted_heights:
return swap_count
for i in range(len(heights)):
if heights[i] != sorted_heights[i]:
swap_count += 1
return swap_count
heights = [1, 1, 4, 2, 1, 3]
heightchecker(heights)
| 25.678571 | 57 | 0.707928 |
def heightchecker(heights):
swap_count = 0
sorted_heights = sorted(heights)
if heights == sorted_heights:
return swap_count
for i in range(len(heights)):
if heights[i] != sorted_heights[i]:
swap_count += 1
return swap_count
heights = [1, 1, 4, 2, 1, 3]
heightchecker(heights)
| true | true |
f72b3ae733f2a861d7932c76a99b7695e4c0bcce | 842 | py | Python | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 2 | 2021-02-01T17:30:46.000Z | 2021-02-22T13:59:49.000Z | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 106 | 2021-04-16T21:15:20.000Z | 2022-03-31T23:02:50.000Z | lstmcpipe/scripts/script_merge_utils_move_dir.py | cta-observatory/lstmcpipe | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 3 | 2022-03-02T09:23:09.000Z | 2022-03-03T16:00:25.000Z | #!/usr/bin/env python
import argparse
from lstmcpipe.io.data_management import (
move_dir_content,
check_and_make_dir_without_verification,
)
parser = argparse.ArgumentParser(
description="Script to move a directory and its content after creating the destination"
" directory."
)
parser.add_argument(
"--source",
"-s",
type=str,
dest="source",
help="source argument of move_dir_content",
)
parser.add_argument(
"--destination",
"-d",
type=str,
dest="dest",
help="destination argument of move_dir_content",
)
def main():
args = parser.parse_args()
# check_and_make_dir(args.dest) CANNOT be used because if demands user interaction.
check_and_make_dir_without_verification(args.dest)
move_dir_content(args.source, args.dest)
if __name__ == "__main__":
main()
| 20.536585 | 91 | 0.704276 |
import argparse
from lstmcpipe.io.data_management import (
move_dir_content,
check_and_make_dir_without_verification,
)
parser = argparse.ArgumentParser(
description="Script to move a directory and its content after creating the destination"
" directory."
)
parser.add_argument(
"--source",
"-s",
type=str,
dest="source",
help="source argument of move_dir_content",
)
parser.add_argument(
"--destination",
"-d",
type=str,
dest="dest",
help="destination argument of move_dir_content",
)
def main():
args = parser.parse_args()
check_and_make_dir_without_verification(args.dest)
move_dir_content(args.source, args.dest)
if __name__ == "__main__":
main()
| true | true |
f72b3cc7e9db8fe13e7966a92394dc17106664c7 | 82,459 | py | Python | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | 2 | 2018-10-24T01:47:49.000Z | 2020-05-30T15:23:02.000Z | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | null | null | null | fpn/symbols/resnet_v1_101_fpn_rcnn.py | YAMLONG/Deformable-ConvNets | ea937451e103ba1fbf4fdcbd08ef3ca1ca832ef4 | [
"Apache-2.0"
] | 1 | 2018-03-29T11:47:01.000Z | 2018-03-29T11:47:01.000Z | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_rcnn(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res2, res3, res4, res5 = self.get_resnet_backbone(data)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling')
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
# group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, mx.sym.BlockGrad(cls_prob), mx.sym.BlockGrad(bbox_loss), mx.sym.BlockGrad(rcnn_label)])
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| 85.009278 | 180 | 0.613323 |
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_rcnn(Symbol):
def __init__(self):
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
res2, res3, res4, res5 = self.get_resnet_backbone(data)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling')
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
| true | true |
f72b3fd1a230b5adaf1f4d92129a73ad00db5a0e | 618 | py | Python | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | null | null | null | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | null | null | null | spider/main.py | ly-520/cbec-toolbox | e60dac81bec0403cf2e7cfd3ae216a8fc9cad343 | [
"Apache-2.0"
] | 1 | 2021-04-26T08:20:43.000Z | 2021-04-26T08:20:43.000Z | import logging
import flask
from controller.goods_controller import *
from controller.order_controller import *
from common.exception_advice import *
app = flask.Flask(__name__)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def main():
app.register_blueprint(exception_advice, url_prefix="/")
app.register_blueprint(order, url_prefix='/order')
app.register_blueprint(goods, url_prefix='/goods')
app.run(host="0.0.0.0", port=33023, debug=False, threaded=True)
if __name__ == '__main__':
main()
| 25.75 | 102 | 0.708738 | import logging
import flask
from controller.goods_controller import *
from controller.order_controller import *
from common.exception_advice import *
app = flask.Flask(__name__)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def main():
app.register_blueprint(exception_advice, url_prefix="/")
app.register_blueprint(order, url_prefix='/order')
app.register_blueprint(goods, url_prefix='/goods')
app.run(host="0.0.0.0", port=33023, debug=False, threaded=True)
if __name__ == '__main__':
main()
| true | true |
f72b4000190522de1de32f143323c9268b7fabb6 | 658 | py | Python | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | 1 | 2019-06-03T15:24:53.000Z | 2019-06-03T15:24:53.000Z | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | olesya-sharafislamova/python_training | 10369f4988261005451f47d5f4242521a0de6b69 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.contact import Contact
import pytest
def test_add_contact(app, db, json_contacts):
contact = json_contacts
with pytest.allure.step('Given a Contact list'):
old_contacts = db.get_contact_list()
with pytest.allure.step('When I add a contact %s to the list' % contact):
app.contact.create(contact)
with pytest.allure.step('Then the contact list is equal to the old list with the added contact'):
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 27.416667 | 105 | 0.702128 |
from model.contact import Contact
import pytest
def test_add_contact(app, db, json_contacts):
contact = json_contacts
with pytest.allure.step('Given a Contact list'):
old_contacts = db.get_contact_list()
with pytest.allure.step('When I add a contact %s to the list' % contact):
app.contact.create(contact)
with pytest.allure.step('Then the contact list is equal to the old list with the added contact'):
new_contacts = db.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| true | true |
f72b41cfb72a15d0999c4d3c54c0b25cae7907ac | 1,379 | py | Python | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | cnf/tests/conftest.py | nyck33/my_cnf | 9ca2b169efec4e84fa63c3b8d68a78e4c3a27ea7 | [
"Apache-2.0"
] | null | null | null | """
conftest.py pytest_fixtures can be accessed by multiple test files
test function has fixture func name as param, then fixture func called and result
passed to test func
added localhost.localdomain to /etc/hosts
"""
import pytest
from cnf.main import setup_app
import pymongo
config_name = 'testing'
the_app = setup_app(config_name, dict(
TESTING=True,
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SERVER_NAME='localhost.localdomain',
WTF_CSRF_ENABLED=False,
))
# the_app = setup_app()
the_app.app_context().push()
@pytest.fixture(scope='session')
def app():
"""Makes app parameter available to test funcs"""
return the_app
@pytest.fixture(scope='session', autouse=True)
def db():
"""Create a test copy of cnf for session"""
client = pymongo.MongoClient("localhost", 27017)
if not client['cnf_test']:
client.admin.command('copydb', fromdb='cnf',
todb='cnf_test')
db = client['cnf_test']
#delete example_user from user collection
user_coll = db.users
myquery = {"username": "example_user"}
user_coll.delete_one(myquery)
return db
@pytest.fixture(scope='function')
def data():
pass
@pytest.fixture(scope='session')
def client(app):
return app.test_client()
| 24.192982 | 81 | 0.648296 | import pytest
from cnf.main import setup_app
import pymongo
config_name = 'testing'
the_app = setup_app(config_name, dict(
TESTING=True,
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SERVER_NAME='localhost.localdomain',
WTF_CSRF_ENABLED=False,
))
the_app.app_context().push()
@pytest.fixture(scope='session')
def app():
return the_app
@pytest.fixture(scope='session', autouse=True)
def db():
client = pymongo.MongoClient("localhost", 27017)
if not client['cnf_test']:
client.admin.command('copydb', fromdb='cnf',
todb='cnf_test')
db = client['cnf_test']
user_coll = db.users
myquery = {"username": "example_user"}
user_coll.delete_one(myquery)
return db
@pytest.fixture(scope='function')
def data():
pass
@pytest.fixture(scope='session')
def client(app):
return app.test_client()
| true | true |
f72b41deca8203786bd3da834a3d9db8b40ffee7 | 14,866 | py | Python | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | tensorflow_data_validation/statistics/stats_options.py | Mikehem/tfx | e803ea6778d8550ec77dcc92bc8172f1a3a90f38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Statistics generation options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import types as python_types
from typing import Dict, List, Optional, Text
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import example_weight_map
from google.protobuf import json_format
from tensorflow_metadata.proto.v0 import schema_pb2
_SCHEMA_JSON_KEY = 'schema_json'
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY = 'per_feature_weight_override_json'
# TODO(b/68277922): Currently we use a single epsilon (error tolerance)
# parameter for all histograms. Set this parameter specific to each
# histogram based on the number of buckets.
# TODO(b/118833241): Set MI default configs when MI is a default generator
class StatsOptions(object):
"""Options for generating statistics."""
def __init__(
self,
generators: Optional[List[stats_generator.StatsGenerator]] = None,
feature_whitelist: Optional[List[types.FeatureName]] = None,
schema: Optional[schema_pb2.Schema] = None,
label_feature: Optional[types.FeatureName] = None,
weight_feature: Optional[types.FeatureName] = None,
slice_functions: Optional[List[types.SliceFunction]] = None,
sample_rate: Optional[float] = None,
num_top_values: int = 20,
frequency_threshold: int = 1,
weighted_frequency_threshold: float = 1.0,
num_rank_histogram_buckets: int = 1000,
num_values_histogram_buckets: int = 10,
num_histogram_buckets: int = 10,
num_quantiles_histogram_buckets: int = 10,
epsilon: float = 0.01,
infer_type_from_schema: bool = False,
desired_batch_size: Optional[int] = None,
enable_semantic_domain_stats: bool = False,
semantic_domain_stats_sample_rate: Optional[float] = None,
per_feature_weight_override: Optional[Dict[types.FeaturePath,
types.FeatureName]] = None):
"""Initializes statistics options.
Args:
generators: An optional list of statistics generators. A statistics
generator must extend either CombinerStatsGenerator or
TransformStatsGenerator.
feature_whitelist: An optional list of names of the features to calculate
statistics for.
schema: An optional tensorflow_metadata Schema proto. Currently we use the
schema to infer categorical and bytes features.
label_feature: An optional feature name which represents the label.
weight_feature: An optional feature name whose numeric value represents
the weight of an example.
slice_functions: An optional list of functions that generate slice keys
for each example. Each slice function should take an example dict as
input and return a list of zero or more slice keys.
sample_rate: An optional sampling rate. If specified, statistics is
computed over the sample.
num_top_values: An optional number of most frequent feature values to keep
for string features.
frequency_threshold: An optional minimum number of examples the most
frequent values must be present in.
weighted_frequency_threshold: An optional minimum weighted number of
examples the most frequent weighted values must be present in. This
option is only relevant when a weight_feature is specified.
num_rank_histogram_buckets: An optional number of buckets in the rank
histogram for string features.
num_values_histogram_buckets: An optional number of buckets in a quantiles
histogram for the number of values per Feature, which is stored in
CommonStatistics.num_values_histogram.
num_histogram_buckets: An optional number of buckets in a standard
NumericStatistics.histogram with equal-width buckets.
num_quantiles_histogram_buckets: An optional number of buckets in a
quantiles NumericStatistics.histogram.
epsilon: An optional error tolerance for the computation of quantiles,
typically a small fraction close to zero (e.g. 0.01). Higher values of
epsilon increase the quantile approximation, and hence result in more
unequal buckets, but could improve performance, and resource
consumption.
infer_type_from_schema: A boolean to indicate whether the feature types
should be inferred from the schema. If set to True, an input schema
must be provided. This flag is used only when generating statistics
on CSV data.
desired_batch_size: An optional number of examples to include in each
batch that is passed to the statistics generators.
enable_semantic_domain_stats: If True statistics for semantic domains are
generated (e.g: image, text domains).
semantic_domain_stats_sample_rate: An optional sampling rate for semantic
domain statistics. If specified, semantic domain statistics is computed
over a sample.
per_feature_weight_override: If specified, the "example weight" paired
with a feature will be first looked up in this map and if not found,
fall back to `weight_feature`.
"""
self.generators = generators
self.feature_whitelist = feature_whitelist
self.schema = schema
self.label_feature = label_feature
self.weight_feature = weight_feature
self.slice_functions = slice_functions
self.sample_rate = sample_rate
self.num_top_values = num_top_values
self.frequency_threshold = frequency_threshold
self.weighted_frequency_threshold = weighted_frequency_threshold
self.num_rank_histogram_buckets = num_rank_histogram_buckets
self.num_values_histogram_buckets = num_values_histogram_buckets
self.num_histogram_buckets = num_histogram_buckets
self.num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
self.epsilon = epsilon
self.infer_type_from_schema = infer_type_from_schema
self.desired_batch_size = desired_batch_size
self.enable_semantic_domain_stats = enable_semantic_domain_stats
self.semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
self._per_feature_weight_override = per_feature_weight_override
def to_json(self) -> Text:
"""Convert from an object to JSON representation of the __dict__ attribute.
Custom generators and slice_functions are skipped, meaning that they will
not be used when running TFDV in a setting where the stats options have been
json-serialized, first. This will happen in the case where TFDV is run as a
TFX component. The schema proto will be json_encoded.
Returns:
A JSON representation of a filtered version of __dict__.
"""
options_dict = copy.copy(self.__dict__)
options_dict['_slice_functions'] = None
options_dict['_generators'] = None
if self.schema is not None:
del options_dict['_schema']
options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)
if self._per_feature_weight_override is not None:
del options_dict['_per_feature_weight_override']
options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {
k.to_json(): v for k, v in self._per_feature_weight_override.items()
}
return json.dumps(options_dict)
@classmethod
def from_json(cls, options_json: Text) -> 'StatsOptions':
"""Construct an instance of stats options from a JSON representation.
Args:
options_json: A JSON representation of the __dict__ attribute of a
StatsOptions instance.
Returns:
A StatsOptions instance constructed by setting the __dict__ attribute to
the deserialized value of options_json.
"""
options_dict = json.loads(options_json)
if _SCHEMA_JSON_KEY in options_dict:
options_dict['_schema'] = json_format.Parse(
options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())
del options_dict[_SCHEMA_JSON_KEY]
per_feature_weight_override_json = options_dict.get(
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)
if per_feature_weight_override_json is not None:
options_dict['_per_feature_weight_override'] = {
types.FeaturePath.from_json(k): v
for k, v in per_feature_weight_override_json.items()
}
del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]
options = cls()
options.__dict__ = options_dict
return options
@property
def generators(self) -> Optional[List[stats_generator.StatsGenerator]]:
return self._generators
@generators.setter
def generators(
self, generators: Optional[List[stats_generator.StatsGenerator]]) -> None:
if generators is not None:
if not isinstance(generators, list):
raise TypeError('generators is of type %s, should be a list.' %
type(generators).__name__)
for generator in generators:
if not isinstance(generator, (
stats_generator.CombinerStatsGenerator,
stats_generator.TransformStatsGenerator,
stats_generator.CombinerFeatureStatsGenerator,
)):
raise TypeError(
'Statistics generator must extend one of '
'CombinerStatsGenerator, TransformStatsGenerator, or '
'CombinerFeatureStatsGenerator found object of type %s.' %
generator.__class__.__name__)
self._generators = generators
@property
def feature_whitelist(self) -> Optional[List[types.FeatureName]]:
return self._feature_whitelist
@feature_whitelist.setter
def feature_whitelist(
self, feature_whitelist: Optional[List[types.FeatureName]]) -> None:
if feature_whitelist is not None and not isinstance(feature_whitelist,
list):
raise TypeError('feature_whitelist is of type %s, should be a list.' %
type(feature_whitelist).__name__)
self._feature_whitelist = feature_whitelist
@property
def schema(self) -> Optional[schema_pb2.Schema]:
return self._schema
@schema.setter
def schema(self, schema: Optional[schema_pb2.Schema]) -> None:
if schema is not None and not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
self._schema = schema
@property
def slice_functions(self) -> Optional[List[types.SliceFunction]]:
return self._slice_functions
@slice_functions.setter
def slice_functions(
self, slice_functions: Optional[List[types.SliceFunction]]) -> None:
if slice_functions is not None:
if not isinstance(slice_functions, list):
raise TypeError('slice_functions is of type %s, should be a list.' %
type(slice_functions).__name__)
for slice_function in slice_functions:
if not isinstance(slice_function, python_types.FunctionType):
raise TypeError('slice_functions must contain functions only.')
self._slice_functions = slice_functions
@property
def sample_rate(self) -> Optional[float]:
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate: Optional[float]):
if sample_rate is not None:
if not 0 < sample_rate <= 1:
raise ValueError('Invalid sample_rate %f' % sample_rate)
self._sample_rate = sample_rate
@property
def num_values_histogram_buckets(self) -> int:
return self._num_values_histogram_buckets
@num_values_histogram_buckets.setter
def num_values_histogram_buckets(self,
num_values_histogram_buckets: int) -> None:
# TODO(b/120164508): Disallow num_values_histogram_buckets = 1 because it
# causes the underlying quantile op to fail. If the quantile op is modified
# to support num_quantiles = 1, then allow num_values_histogram_buckets = 1.
if num_values_histogram_buckets <= 1:
raise ValueError('Invalid num_values_histogram_buckets %d' %
num_values_histogram_buckets)
self._num_values_histogram_buckets = num_values_histogram_buckets
@property
def num_histogram_buckets(self) -> int:
return self._num_histogram_buckets
@num_histogram_buckets.setter
def num_histogram_buckets(self, num_histogram_buckets: int) -> None:
if num_histogram_buckets < 1:
raise ValueError(
'Invalid num_histogram_buckets %d' % num_histogram_buckets)
self._num_histogram_buckets = num_histogram_buckets
@property
def num_quantiles_histogram_buckets(self) -> int:
return self._num_quantiles_histogram_buckets
@num_quantiles_histogram_buckets.setter
def num_quantiles_histogram_buckets(
self, num_quantiles_histogram_buckets: int) -> None:
if num_quantiles_histogram_buckets < 1:
raise ValueError('Invalid num_quantiles_histogram_buckets %d' %
num_quantiles_histogram_buckets)
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
@property
def desired_batch_size(self) -> Optional[int]:
return self._desired_batch_size
@desired_batch_size.setter
def desired_batch_size(self, desired_batch_size: Optional[int]) -> None:
if desired_batch_size is not None and desired_batch_size < 1:
raise ValueError('Invalid desired_batch_size %d' %
desired_batch_size)
self._desired_batch_size = desired_batch_size
@property
def semantic_domain_stats_sample_rate(self) -> Optional[float]:
return self._semantic_domain_stats_sample_rate
@semantic_domain_stats_sample_rate.setter
def semantic_domain_stats_sample_rate(
self, semantic_domain_stats_sample_rate: Optional[float]):
if semantic_domain_stats_sample_rate is not None:
if not 0 < semantic_domain_stats_sample_rate <= 1:
raise ValueError('Invalid semantic_domain_stats_sample_rate %f'
% semantic_domain_stats_sample_rate)
self._semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
@property
def example_weight_map(self):
return example_weight_map.ExampleWeightMap(
self.weight_feature, self._per_feature_weight_override)
| 43.852507 | 80 | 0.735907 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import types as python_types
from typing import Dict, List, Optional, Text
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import example_weight_map
from google.protobuf import json_format
from tensorflow_metadata.proto.v0 import schema_pb2
_SCHEMA_JSON_KEY = 'schema_json'
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY = 'per_feature_weight_override_json'
class StatsOptions(object):
def __init__(
self,
generators: Optional[List[stats_generator.StatsGenerator]] = None,
feature_whitelist: Optional[List[types.FeatureName]] = None,
schema: Optional[schema_pb2.Schema] = None,
label_feature: Optional[types.FeatureName] = None,
weight_feature: Optional[types.FeatureName] = None,
slice_functions: Optional[List[types.SliceFunction]] = None,
sample_rate: Optional[float] = None,
num_top_values: int = 20,
frequency_threshold: int = 1,
weighted_frequency_threshold: float = 1.0,
num_rank_histogram_buckets: int = 1000,
num_values_histogram_buckets: int = 10,
num_histogram_buckets: int = 10,
num_quantiles_histogram_buckets: int = 10,
epsilon: float = 0.01,
infer_type_from_schema: bool = False,
desired_batch_size: Optional[int] = None,
enable_semantic_domain_stats: bool = False,
semantic_domain_stats_sample_rate: Optional[float] = None,
per_feature_weight_override: Optional[Dict[types.FeaturePath,
types.FeatureName]] = None):
self.generators = generators
self.feature_whitelist = feature_whitelist
self.schema = schema
self.label_feature = label_feature
self.weight_feature = weight_feature
self.slice_functions = slice_functions
self.sample_rate = sample_rate
self.num_top_values = num_top_values
self.frequency_threshold = frequency_threshold
self.weighted_frequency_threshold = weighted_frequency_threshold
self.num_rank_histogram_buckets = num_rank_histogram_buckets
self.num_values_histogram_buckets = num_values_histogram_buckets
self.num_histogram_buckets = num_histogram_buckets
self.num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
self.epsilon = epsilon
self.infer_type_from_schema = infer_type_from_schema
self.desired_batch_size = desired_batch_size
self.enable_semantic_domain_stats = enable_semantic_domain_stats
self.semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
self._per_feature_weight_override = per_feature_weight_override
def to_json(self) -> Text:
options_dict = copy.copy(self.__dict__)
options_dict['_slice_functions'] = None
options_dict['_generators'] = None
if self.schema is not None:
del options_dict['_schema']
options_dict[_SCHEMA_JSON_KEY] = json_format.MessageToJson(self.schema)
if self._per_feature_weight_override is not None:
del options_dict['_per_feature_weight_override']
options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY] = {
k.to_json(): v for k, v in self._per_feature_weight_override.items()
}
return json.dumps(options_dict)
@classmethod
def from_json(cls, options_json: Text) -> 'StatsOptions':
options_dict = json.loads(options_json)
if _SCHEMA_JSON_KEY in options_dict:
options_dict['_schema'] = json_format.Parse(
options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())
del options_dict[_SCHEMA_JSON_KEY]
per_feature_weight_override_json = options_dict.get(
_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)
if per_feature_weight_override_json is not None:
options_dict['_per_feature_weight_override'] = {
types.FeaturePath.from_json(k): v
for k, v in per_feature_weight_override_json.items()
}
del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]
options = cls()
options.__dict__ = options_dict
return options
@property
def generators(self) -> Optional[List[stats_generator.StatsGenerator]]:
return self._generators
@generators.setter
def generators(
self, generators: Optional[List[stats_generator.StatsGenerator]]) -> None:
if generators is not None:
if not isinstance(generators, list):
raise TypeError('generators is of type %s, should be a list.' %
type(generators).__name__)
for generator in generators:
if not isinstance(generator, (
stats_generator.CombinerStatsGenerator,
stats_generator.TransformStatsGenerator,
stats_generator.CombinerFeatureStatsGenerator,
)):
raise TypeError(
'Statistics generator must extend one of '
'CombinerStatsGenerator, TransformStatsGenerator, or '
'CombinerFeatureStatsGenerator found object of type %s.' %
generator.__class__.__name__)
self._generators = generators
@property
def feature_whitelist(self) -> Optional[List[types.FeatureName]]:
return self._feature_whitelist
@feature_whitelist.setter
def feature_whitelist(
self, feature_whitelist: Optional[List[types.FeatureName]]) -> None:
if feature_whitelist is not None and not isinstance(feature_whitelist,
list):
raise TypeError('feature_whitelist is of type %s, should be a list.' %
type(feature_whitelist).__name__)
self._feature_whitelist = feature_whitelist
@property
def schema(self) -> Optional[schema_pb2.Schema]:
return self._schema
@schema.setter
def schema(self, schema: Optional[schema_pb2.Schema]) -> None:
if schema is not None and not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
self._schema = schema
@property
def slice_functions(self) -> Optional[List[types.SliceFunction]]:
return self._slice_functions
@slice_functions.setter
def slice_functions(
self, slice_functions: Optional[List[types.SliceFunction]]) -> None:
if slice_functions is not None:
if not isinstance(slice_functions, list):
raise TypeError('slice_functions is of type %s, should be a list.' %
type(slice_functions).__name__)
for slice_function in slice_functions:
if not isinstance(slice_function, python_types.FunctionType):
raise TypeError('slice_functions must contain functions only.')
self._slice_functions = slice_functions
@property
def sample_rate(self) -> Optional[float]:
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate: Optional[float]):
if sample_rate is not None:
if not 0 < sample_rate <= 1:
raise ValueError('Invalid sample_rate %f' % sample_rate)
self._sample_rate = sample_rate
@property
def num_values_histogram_buckets(self) -> int:
return self._num_values_histogram_buckets
@num_values_histogram_buckets.setter
def num_values_histogram_buckets(self,
num_values_histogram_buckets: int) -> None:
if num_values_histogram_buckets <= 1:
raise ValueError('Invalid num_values_histogram_buckets %d' %
num_values_histogram_buckets)
self._num_values_histogram_buckets = num_values_histogram_buckets
@property
def num_histogram_buckets(self) -> int:
return self._num_histogram_buckets
@num_histogram_buckets.setter
def num_histogram_buckets(self, num_histogram_buckets: int) -> None:
if num_histogram_buckets < 1:
raise ValueError(
'Invalid num_histogram_buckets %d' % num_histogram_buckets)
self._num_histogram_buckets = num_histogram_buckets
@property
def num_quantiles_histogram_buckets(self) -> int:
return self._num_quantiles_histogram_buckets
@num_quantiles_histogram_buckets.setter
def num_quantiles_histogram_buckets(
self, num_quantiles_histogram_buckets: int) -> None:
if num_quantiles_histogram_buckets < 1:
raise ValueError('Invalid num_quantiles_histogram_buckets %d' %
num_quantiles_histogram_buckets)
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
@property
def desired_batch_size(self) -> Optional[int]:
return self._desired_batch_size
@desired_batch_size.setter
def desired_batch_size(self, desired_batch_size: Optional[int]) -> None:
if desired_batch_size is not None and desired_batch_size < 1:
raise ValueError('Invalid desired_batch_size %d' %
desired_batch_size)
self._desired_batch_size = desired_batch_size
@property
def semantic_domain_stats_sample_rate(self) -> Optional[float]:
return self._semantic_domain_stats_sample_rate
@semantic_domain_stats_sample_rate.setter
def semantic_domain_stats_sample_rate(
self, semantic_domain_stats_sample_rate: Optional[float]):
if semantic_domain_stats_sample_rate is not None:
if not 0 < semantic_domain_stats_sample_rate <= 1:
raise ValueError('Invalid semantic_domain_stats_sample_rate %f'
% semantic_domain_stats_sample_rate)
self._semantic_domain_stats_sample_rate = semantic_domain_stats_sample_rate
@property
def example_weight_map(self):
return example_weight_map.ExampleWeightMap(
self.weight_feature, self._per_feature_weight_override)
| true | true |
f72b42d5c27b87418372e74671846c4f0f9ab98a | 3,825 | py | Python | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | tests/extension/thread_/stream_reduce_arg_max/thread_stream_reduce_arg_max.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
strm = vthread.Stream(m, 'mystream', clk, rst)
a = strm.source('a')
size = strm.parameter('size')
index, _max, argmax_valid = strm.ReduceArgMaxValid(a, size)
strm.sink(index, 'index', when=argmax_valid, when_name='argmax_valid')
def comp_stream(size, offset):
strm.set_source('a', ram_a, offset, size)
strm.set_parameter('size', size)
strm.set_sink('index', ram_b, offset, 1)
strm.run()
strm.join()
def comp_sequential(size, offset):
index = 0
_max = 0
for i in range(size):
a = ram_a.read(i + offset)
if a > _max:
index = i
_max = a
ram_b.write(offset, index)
def check(size, offset_stream, offset_seq):
all_ok = True
for i in range(size):
st = ram_b.read(i + offset_stream)
sq = ram_b.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_stream(size, offset)
myaxi.dma_write(ram_b, offset, 1024, 1)
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_sequential(size, offset)
myaxi.dma_write(ram_b, offset, 1024 * 2, 1)
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| 27.12766 | 87 | 0.602092 | from __future__ import absolute_import
from __future__ import print_function
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
strm = vthread.Stream(m, 'mystream', clk, rst)
a = strm.source('a')
size = strm.parameter('size')
index, _max, argmax_valid = strm.ReduceArgMaxValid(a, size)
strm.sink(index, 'index', when=argmax_valid, when_name='argmax_valid')
def comp_stream(size, offset):
strm.set_source('a', ram_a, offset, size)
strm.set_parameter('size', size)
strm.set_sink('index', ram_b, offset, 1)
strm.run()
strm.join()
def comp_sequential(size, offset):
index = 0
_max = 0
for i in range(size):
a = ram_a.read(i + offset)
if a > _max:
index = i
_max = a
ram_b.write(offset, index)
def check(size, offset_stream, offset_seq):
all_ok = True
for i in range(size):
st = ram_b.read(i + offset_stream)
sq = ram_b.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_stream(size, offset)
myaxi.dma_write(ram_b, offset, 1024, 1)
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
ram_a.write(offset + 3, -100)
ram_a.write(offset + 7, 200)
comp_sequential(size, offset)
myaxi.dma_write(ram_b, offset, 1024 * 2, 1)
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
return m
def mkTest(memimg_name=None):
m = Module('test')
led = mkLed()
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| true | true |
f72b4311c1c935e36ff12b3275ecdd0411d9a83c | 2,457 | py | Python | solfasol/shop/migrations/0004_auto_20201004_2111.py | rekognize/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | null | null | null | solfasol/shop/migrations/0004_auto_20201004_2111.py | rekognize/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | 1 | 2020-06-18T13:08:47.000Z | 2020-06-18T13:08:47.000Z | solfasol/shop/migrations/0004_auto_20201004_2111.py | Solfasol/solfasol | c960c3364c753d75161242eccac4f085d800c843 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-04 18:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0009_auto_20200918_0020'),
('shop', '0003_auto_20201004_2109'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'cart',
'verbose_name_plural': 'carts',
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.item', verbose_name='item')),
],
),
migrations.CreateModel(
name='CartIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.issue', verbose_name='issue')),
],
),
migrations.AddField(
model_name='cart',
name='issues',
field=models.ManyToManyField(blank=True, through='shop.CartIssue', to='issues.Issue'),
),
migrations.AddField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, through='shop.CartItem', to='shop.Item'),
),
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.cart', verbose_name='cart'),
),
]
| 40.95 | 142 | 0.580383 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issues', '0009_auto_20200918_0020'),
('shop', '0003_auto_20201004_2109'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'cart',
'verbose_name_plural': 'carts',
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.item', verbose_name='item')),
],
),
migrations.CreateModel(
name='CartIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.PositiveSmallIntegerField(default=1)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.cart', verbose_name='cart')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='issues.issue', verbose_name='issue')),
],
),
migrations.AddField(
model_name='cart',
name='issues',
field=models.ManyToManyField(blank=True, through='shop.CartIssue', to='issues.Issue'),
),
migrations.AddField(
model_name='cart',
name='items',
field=models.ManyToManyField(blank=True, through='shop.CartItem', to='shop.Item'),
),
migrations.AddField(
model_name='order',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='shop.cart', verbose_name='cart'),
),
]
| true | true |
f72b433198e89063d5e6f1584dc14c04ef6a68fb | 2,261 | py | Python | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | 3 | 2019-01-20T08:55:06.000Z | 2019-12-02T05:59:26.000Z | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | null | null | null | tests/test_mixture_transitions.py | probcomp/cgpm2 | 280ab5bf3dd0d7c61196deaff7cb590692fc412a | [
"Apache-2.0"
] | 3 | 2019-08-06T07:27:34.000Z | 2019-09-28T23:26:57.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018 MIT Probabilistic Computing Project.
# Released under Apache 2.0; refer to LICENSE.txt.
from collections import Counter
import numpy as np
from cgpm.utils.general import get_prng
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.transition_hypers import transition_hyper_grids
from cgpm2.transition_hypers import transition_hypers
from cgpm2.transition_rows import transition_rows
from cgpm2.walks import get_cgpms_by_output_index
def test_transition_crp_mixture():
prng = get_prng(2)
data = np.concatenate((
prng.normal(loc=0, scale=2, size=20),
prng.normal(loc=30, scale=1, size=20),
prng.normal(loc=-30, scale=1, size=20),
))
infinite_mixture = FlexibleRowMixture(
cgpm_row_divide=CRP([1], [], rng=prng),
cgpm_components_base=Normal([0], [], rng=prng),
rng=prng
)
for rowid, value in enumerate(data):
infinite_mixture.observe(rowid, {0: value})
cgpms = {
0 : get_cgpms_by_output_index(infinite_mixture, 0),
1 : get_cgpms_by_output_index(infinite_mixture, 1),
}
grids = {
0 : transition_hyper_grids(cgpms[0], 30),
1 : transition_hyper_grids(cgpms[1], 30),
}
for _step in xrange(50):
rowids = prng.permutation(range(len(data)))
for rowid in rowids:
transition_rows(infinite_mixture, rowid, prng)
for output in infinite_mixture.outputs:
transition_hypers(cgpms[output], grids[output], prng)
rowids = range(60)
assignments0 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[00:20]]
assignments1 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[20:40]]
assignments2 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[40:60]]
mode0 = Counter(assignments0).most_common(1)[0][0]
mode1 = Counter(assignments1).most_common(1)[0][0]
mode2 = Counter(assignments2).most_common(1)[0][0]
assert sum(a==mode0 for a in assignments0) > int(0.95*len(assignments0))
assert sum(a==mode1 for a in assignments1) > int(0.95*len(assignments1))
assert sum(a==mode2 for a in assignments2) > int(0.95*len(assignments2))
| 37.065574 | 80 | 0.689518 |
from collections import Counter
import numpy as np
from cgpm.utils.general import get_prng
from cgpm2.crp import CRP
from cgpm2.normal import Normal
from cgpm2.flexible_rowmix import FlexibleRowMixture
from cgpm2.transition_hypers import transition_hyper_grids
from cgpm2.transition_hypers import transition_hypers
from cgpm2.transition_rows import transition_rows
from cgpm2.walks import get_cgpms_by_output_index
def test_transition_crp_mixture():
prng = get_prng(2)
data = np.concatenate((
prng.normal(loc=0, scale=2, size=20),
prng.normal(loc=30, scale=1, size=20),
prng.normal(loc=-30, scale=1, size=20),
))
infinite_mixture = FlexibleRowMixture(
cgpm_row_divide=CRP([1], [], rng=prng),
cgpm_components_base=Normal([0], [], rng=prng),
rng=prng
)
for rowid, value in enumerate(data):
infinite_mixture.observe(rowid, {0: value})
cgpms = {
0 : get_cgpms_by_output_index(infinite_mixture, 0),
1 : get_cgpms_by_output_index(infinite_mixture, 1),
}
grids = {
0 : transition_hyper_grids(cgpms[0], 30),
1 : transition_hyper_grids(cgpms[1], 30),
}
for _step in xrange(50):
rowids = prng.permutation(range(len(data)))
for rowid in rowids:
transition_rows(infinite_mixture, rowid, prng)
for output in infinite_mixture.outputs:
transition_hypers(cgpms[output], grids[output], prng)
rowids = range(60)
assignments0 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[00:20]]
assignments1 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[20:40]]
assignments2 = [infinite_mixture.simulate(r, [1])[1] for r in rowids[40:60]]
mode0 = Counter(assignments0).most_common(1)[0][0]
mode1 = Counter(assignments1).most_common(1)[0][0]
mode2 = Counter(assignments2).most_common(1)[0][0]
assert sum(a==mode0 for a in assignments0) > int(0.95*len(assignments0))
assert sum(a==mode1 for a in assignments1) > int(0.95*len(assignments1))
assert sum(a==mode2 for a in assignments2) > int(0.95*len(assignments2))
| true | true |
f72b440bbd74e2d435413383ae7761a669cd513a | 68,958 | py | Python | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 1 | 2021-07-01T17:52:12.000Z | 2021-07-01T17:52:12.000Z | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 3 | 2021-01-07T23:36:40.000Z | 2021-12-13T20:43:27.000Z | arelle/plugin/xbrlDB/XbrlSemanticSqlDB.py | DataFinnovation/Arelle | d4bf45f56fc9249f75ab22e6217dbe55f0510841 | [
"Apache-2.0"
] | 2 | 2020-02-24T16:06:12.000Z | 2021-02-24T00:21:32.000Z | '''
XbrlSemanticSqlDB.py implements an SQL database interface for Arelle, based
on a concrete realization of the Abstract Model PWD 2.0 layer. This is a semantic
representation of XBRL information.
This module may save directly to a Postgres, MySQL, SQLite, MSSQL, or Oracle server.
This module provides the execution context for saving a dts and instances in
XBRL SQL database. It may be loaded by Arelle's RSS feed, or by individual
DTS and instances opened by interactive or command line/web service mode.
Example dialog or command line parameters for operation:
host: the supporting host for SQL Server
port: the host port of server
user, password: if needed for server
database: the top level path segment for the SQL Server
timeout:
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
to use from command line:
linux
# be sure plugin is installed
arelleCmdLine --plugin '+xbrlDB|show'
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB 'myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds'
windows
rem be sure plugin is installed
arelleCmdLine --plugin "xbrlDB"
arelleCmdLine -f http://sec.org/somewhere/some.rss -v --store-to-XBRL-DB "myserver.com,portnumber,pguser,pgpasswd,database,timeoutseconds"
'''
import os, time, datetime, logging
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence
from arelle import XbrlConst
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, entrypoint=None, rssItem=None, **kwargs):
xbrlDbConn = None
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs: # initialize batch
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
xbrlDbConn.insertXbrl(entrypoint, rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise # reraise original exception with original traceback
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"aspect", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"data_point", "entity", "period", "unit", "unit_measure", "aspect_value_selection",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
# if no tables, initialize database
if missingTables == XBRLDBTABLES:
self.create(os.path.join("sql", "semantic", {"mssql": "xbrlSemanticMSSqlDB.sql",
"mysql": "xbrlSemanticMySqlDB.ddl",
"sqlite": "xbrlSemanticSQLiteDB.ddl",
"orcl": "xbrlSemanticOracleDB.sql",
"postgres": "xbrlSemanticPostgresDB.ddl"}[self.product]))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, entrypoint, rssItem):
try:
# must also have default dimensions loaded
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
# get logging entries (needed to find which aspects to identify)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
# must have a valid XBRL instance or document
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
# obtain supplementaion entity information
self.entityInformation = loadEntityInformation(self.modelXbrl, entrypoint, rssItem)
# identify table facts (table datapoints) (prior to locked database transaction
self.tableFacts = tableFacts(self.modelXbrl) # for EFM & HMRC this is ( (roleType, table_code, fact) )
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation) # load primary document facts for SEC filing
self.identifyTaxonomyRelSetsOwner()
# at this point we determine what's in the database and provide new tables
# requires locking most of the table structure
self.lockTables(('entity', 'filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyAspectsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertAspects()
self.modelXbrl.profileStat(_("XbrlSqlDB: Aspects insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
# self.modelXbrl.profileStat(_("XbrlSqlDB: DTS insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships() # must follow data points for footnote relationships
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
# walk down referenced document set from instance to find 'lowest' taxonomy relationship set ownership
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
# filing must own the taxonomy set
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None: # manifest for inline docs can own the rel sets
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else: # no single instance, pick the entry poin doct
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument # entry document (instance or inline doc set)
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear() # dereference
instanceDocuments.clear()
# check whether relationship_set is completely in instance or part/all in taxonomy
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results) # timestamp is a string
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert entity")
LEI = None
entity_comparator = ('legal_entity_number', 'file_number') if LEI else ('file_number',)
table = self.getTable('entity', 'entity_id',
('legal_entity_number',
'file_number',
'reference_number', # CIK
'tax_number',
'standard_industry_code',
'name',
'legal_state',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
entity_comparator, # cannot compare None = None if LEI is absent, always False
((LEI,
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
rssItemGet("cikNumber") or entityInfo.get("cik"),
entityInfo.get("irs-number"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end"),
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
if any ('former-conformed-name' in key for key in entityInfo.keys()):
self.getTable('former_entity', None,
('entity_id', 'former_name', 'date_changed'),
('entity_id', 'former_name', 'date_changed'),
((self.entityId,
entityInfo.get(keyPrefix + '.former-conformed-name'),
entityInfo.get(keyPrefix + '.date-changed'))
for key in entityInfo.keys() if 'former-conformed-name' in key
for keyPrefix in (key.partition('.')[0],)),
checkIfExisting=True)
self.showStatus("insert filing")
table = self.getTable('filing', 'filing_id',
('filing_number', 'form_type', 'entity_id', 'reference_number',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url', 'entry_url', ),
('filing_number',),
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())), # NOT NULL
rssItemGet("formType") or entityInfo.get("form-type"),
self.entityId,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now, # NOT NULL
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url")
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
# must include document items taxonomy even if not in DTS
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[self.pyStrFromDbStr(docUrl)],docId)
for docId, docUrl in results)
# identify whether taxonomyRelsSetsOwner is existing
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyAspectsUsed(self):
# relationshipSets are a dts property
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
aspectsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
aspectsUsed.add(dim.dimension)
if dim.isExplicit:
aspectsUsed.add(dim.member)
else:
aspectsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
aspectsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
aspectsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass # no DTS
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts: # qname may be undefined or invalid and still 2.1 legal
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
# add aspects referenced by logging entries
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
aspectsUsed.add(modelObject)
# add substitution groups
aspectsUsed |= set(aspect.substitutionGroup
for aspect in aspectsUsed
if aspect is not None)
aspectsUsed -= {None} # remove None if in aspectsUsed
self.aspectsUsed = aspectsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS: # exclude nonDTS types (schema, etc)
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list): # union derivation
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else: # single derivation
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for aspect in aspectsUsed:
modelType = aspect.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[self.pyStrFromDbStr(url)], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
# instance documents are filing references
# update report with document references
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None, # no id column in this table
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
# referenced doc may be extension schema
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
)
def insertAspects(self):
self.showStatus("insert aspects")
# determine new filing documents and types they use
filingDocumentAspects = set()
existingDocumentUsedAspects = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentAspects.add(concept)
filingDocumentAspectType = concept.type
if filingDocumentAspectType is not None and filingDocumentAspectType not in self.typesUsed:
self.typesUsed.add(filingDocumentAspectType)
elif concept in self.aspectsUsed:
existingDocumentUsedAspects.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
# get existing element IDs
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
# update derivedFrom's of newly added types
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.aspectQnameId = {}
# get existing element IDs
if existingDocumentUsedAspects:
table = self.getTable('aspect', 'aspect_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedAspects
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
aspects = []
for concept in filingDocumentAspects:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
aspects.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.aspectQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('aspect', 'aspect_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_aspect_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
aspects
)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
updatesToSubstitutionGroup = set()
for concept in filingDocumentAspects:
if concept.substitutionGroup in filingDocumentAspects and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.aspectQnameId[concept.qname],
self.aspectQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('aspect',
('aspect_id', 'substitution_group_aspect_id'),
updatesToSubstitutionGroup)
filingDocumentAspects.clear() # dereference
existingDocumentUsedAspects.clear() # dereference
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
# add existing arcrole types
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI) # key on docId, uriId
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleTypeIDs[1] # uri Id
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
# added document arcrole type
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI), # key on docId, uriId
arcroleType) # value is roleType object
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0], # doc Id
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.aspectQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
# add existing role types
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI) # key on docId, uriId
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleTypeIDs[1] # uri Id
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
# new document role types
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI), # key on docId, uriId
roleType) # value is roleType object
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0], # doc Id
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1], # uri Id
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None, # no record id in this table
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.aspectQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
# deduplicate resources (may be on multiple arcs)
arcroles = [arcrole
# check whether relationship_set is completely in instance or part/all in taxonomy
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
# note that lxml has no column numbers, use objectIndex as pseudo-column number
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.aspectTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factDataPointId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
# do tree walk to build relationships with depth annotated, no targetRole navigation
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight), # none if no weight
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:] # dererefence
def insertDataPoints(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior data points of this report")
# remove prior facts
self.lockTables(("data_point", "entity_identifier", "period", "aspect_value_selection",
"aspect_value_selection_set", "unit_measure", "unit",
"table_data_points"))
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("data_point"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("entity_identifier"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("period"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.aspect_value_selection_id = {1}.aspect_value_selection_id"
.format( self.dbTableName("aspect_value_selection"),
self.dbTableName("aspect_value_selection_set"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1};"
.format( self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.unit_id = {1}.unit_id"
.format( self.dbTableName("unit_measure"),
self.dbTableName("unit"),
reportId),
close=False, fetch=False)
self.execute("DELETE from {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("unit"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("table_data_points"), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
# units
table = self.getTable('unit', 'unit_id',
('report_id', 'xml_id', 'xml_child_seq', 'measures_hash'),
('report_id', 'measures_hash'),
tuple((reportId,
unit.id,
elementChildSequence(unit),
unit.md5hash)
for unit in dict((unit.md5hash,unit) # deduplicate by md5hash
for unit in self.modelXbrl.units.values()).values()))
self.unitId = dict(((_reportId, measuresHash), id)
for id, _reportId, measuresHash in table)
# measures
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[(reportId,unit.md5hash)],
measure.clarkNotation,
i == 0)
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('report_id', 'scheme', 'identifier'),
('report_id', 'scheme', 'identifier'),
set((reportId,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # entities shared across multiple instance/inline docs
self.entityIdentifierId = dict(((_reportId, entScheme, entIdent), id)
for id, _reportId, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
set((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True) # periods shared across multiple instance/inline docs
self.periodId = dict(((_reportId, start, end, isInstant, isForever), id)
for id, _reportId, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.aspectQnameId[modelDimValue.dimensionQname],
self.aspectQnameId.get(modelDimValue.memberQname),
modelDimValue.isTyped,
modelDimValue.stringValue if modelDimValue.isTyped else None)
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.aspectQnameId)
cntxAspectValueSelectionSet = dict((cntx, cntxDimsSet(cntx))
for cntx in self.modelXbrl.contexts.values())
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSelectionSet.items()
if aspectValueSelectionSet)
self.lockTables(("aspect_value_selection_set",))
self.execute("DELETE FROM {0} WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
table = self.getTable('aspect_value_selection_set', 'aspect_value_selection_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
# assure we only get single entry per result (above gives cross product)
table = self.execute("SELECT aspect_value_selection_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId))
aspectValueSelectionSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSelectionSetId = dict((cntx, aspectValueSelectionSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSelectionSet.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_selection',
None,
('aspect_value_selection_id', 'aspect_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_selection_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueSelectionSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
# facts
def insertFactSet(modelFacts, parentDatapointId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id,
elementChildSequence(fact),
fact.sourceline,
parentDatapointId, # parent ID
self.aspectQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((reportId, cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSelectionSetId.get(cntx) if cntx is not None else None,
self.unitId.get((reportId,fact.unit.md5hash)) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value
))
table = self.getTable('data_point', 'datapoint_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'parent_datapoint_id', # tuple
'aspect_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_selection_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdDataPointId = dict(((docId, xml_child_seq), datapointId)
for datapointId, docId, xml_child_seq in table)
self.factDataPointId.update(xmlIdDataPointId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factDataPointId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
tableDataPoints = []
for roleType, tableCode, fact in self.tableFacts:
try:
tableDataPoints.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table data points role or data point")
pass
table = self.getTable('table_data_points', None,
('report_id', 'object_id', 'table_code', 'datapoint_id'),
('report_id', 'object_id', 'datapoint_id'),
tableDataPoints)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factDataPointId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| 58.5382 | 144 | 0.517068 |
import os, time, datetime, logging
from arelle.ModelDocument import Type
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelResource, ModelRelationship
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import ModelXbrl
from arelle.ModelDocument import ModelDocument
from arelle.ModelObject import ModelObject
from arelle.ModelValue import qname
from arelle.ValidateXbrlCalcs import roundValue
from arelle.XmlValidate import UNVALIDATED, VALID
from arelle.XmlUtil import elementChildSequence
from arelle import XbrlConst
from arelle.UrlUtil import authority, ensureUrl
from .SqlDb import XPDBException, isSqlConnection, SqlDbConnection
from .tableFacts import tableFacts
from .entityInformation import loadEntityInformation
from .primaryDocumentFacts import loadPrimaryDocumentFacts
from collections import defaultdict
def insertIntoDB(modelXbrl,
user=None, password=None, host=None, port=None, database=None, timeout=None,
product=None, entrypoint=None, rssItem=None, **kwargs):
xbrlDbConn = None
try:
xbrlDbConn = XbrlSqlDatabaseConnection(modelXbrl, user, password, host, port, database, timeout, product)
if "rssObject" in kwargs:
xbrlDbConn.initializeBatch(kwargs["rssObject"])
else:
xbrlDbConn.verifyTables()
xbrlDbConn.insertXbrl(entrypoint, rssItem)
xbrlDbConn.close()
except Exception as ex:
if xbrlDbConn is not None:
try:
xbrlDbConn.close(rollback=True)
except Exception as ex2:
pass
raise
def isDBPort(host, port, timeout=10, product="postgres"):
return isSqlConnection(host, port, timeout)
XBRLDBTABLES = {
"filing", "report",
"document", "referenced_documents",
"aspect", "data_type", "role_type", "arcrole_type",
"resource", "relationship_set", "root", "relationship",
"data_point", "entity", "period", "unit", "unit_measure", "aspect_value_selection",
"message", "message_reference",
"industry", "industry_level", "industry_structure",
}
class XbrlSqlDatabaseConnection(SqlDbConnection):
def verifyTables(self):
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables == XBRLDBTABLES:
self.create(os.path.join("sql", "semantic", {"mssql": "xbrlSemanticMSSqlDB.sql",
"mysql": "xbrlSemanticMySqlDB.ddl",
"sqlite": "xbrlSemanticSQLiteDB.ddl",
"orcl": "xbrlSemanticOracleDB.sql",
"postgres": "xbrlSemanticPostgresDB.ddl"}[self.product]))
missingTables = XBRLDBTABLES - self.tablesInDB()
if missingTables and missingTables != {"sequences"}:
raise XPDBException("sqlDB:MissingTables",
_("The following tables are missing: %(missingTableNames)s"),
missingTableNames=', '.join(t for t in sorted(missingTables)))
def insertXbrl(self, entrypoint, rssItem):
try:
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(self.modelXbrl)
self.loggingEntries = []
for handler in logging.getLogger("arelle").handlers:
if hasattr(handler, "dbHandlerLogEntries"):
self.loggingEntries = handler.dbHandlerLogEntries()
break
if self.modelXbrl.modelDocument is None:
raise XPDBException("xpgDB:MissingXbrlDocument",
_("No XBRL instance or schema loaded for this filing."))
self.entityInformation = loadEntityInformation(self.modelXbrl, entrypoint, rssItem)
self.tableFacts = tableFacts(self.modelXbrl)
loadPrimaryDocumentFacts(self.modelXbrl, rssItem, self.entityInformation)
self.identifyTaxonomyRelSetsOwner()
# requires locking most of the table structure
self.lockTables(('entity', 'filing', 'report', 'document', 'referenced_documents'),
isSessionTransaction=True) # lock for whole transaction
# find pre-existing documents in server database
self.identifyPreexistingDocuments()
self.identifyAspectsUsed()
self.dropTemporaryTable()
startedAt = time.time()
self.syncSequences = True # for data base types that don't explicity handle sequences
self.insertFiling(rssItem)
self.modelXbrl.profileStat(_("XbrlSqlDB: Filing insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertDocuments()
self.modelXbrl.profileStat(_("XbrlSqlDB: Documents insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertAspects()
self.modelXbrl.profileStat(_("XbrlSqlDB: Aspects insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertArcroleTypes()
self.insertRoleTypes()
self.modelXbrl.profileStat(_("XbrlSqlDB: Role Types insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertResources()
self.modelXbrl.profileStat(_("XbrlSqlDB: Resources insertion"), time.time() - startedAt)
startedAt = time.time()
startedAt = time.time()
self.insertDataPoints()
self.modelXbrl.profileStat(_("XbrlSqlDB: instance insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertRelationships()
self.modelXbrl.profileStat(_("XbrlSqlDB: Relationships insertion"), time.time() - startedAt)
startedAt = time.time()
self.insertValidationResults()
self.modelXbrl.profileStat(_("XbrlSqlDB: Validation results insertion"), time.time() - startedAt)
startedAt = time.time()
self.showStatus("Committing entries")
self.commit()
self.modelXbrl.profileStat(_("XbrlSqlDB: insertion committed"), time.time() - startedAt)
self.showStatus("DB insertion completed", clearAfter=5000)
except Exception as ex:
self.showStatus("DB insertion failed due to exception", clearAfter=5000)
raise
def identifyTaxonomyRelSetsOwner(self):
instanceReferencedDocuments = set()
instanceDocuments = set()
inlineXbrlDocSet = None
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instanceDocuments.add(mdlDoc)
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include"):
instanceReferencedDocuments.add(refDoc)
elif mdlDoc.type == Type.INLINEXBRLDOCUMENTSET:
inlineXbrlDocSet = mdlDoc
if len(instanceReferencedDocuments) > 1:
if len(instanceDocuments) == 1:
self.taxonomyRelSetsOwner = instanceDocuments.pop()
elif inlineXbrlDocSet is not None:
self.taxonomyRelSetsOwner = inlineXbrlDocSet
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
elif len(instanceReferencedDocuments) == 1:
self.taxonomyRelSetsOwner = instanceReferencedDocuments.pop()
elif self.modelXbrl.modelDocument.type == Type.SCHEMA:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
else:
self.taxonomyRelSetsOwner = self.modelXbrl.modelDocument
instanceReferencedDocuments.clear()
instanceDocuments.clear()
self.arcroleInInstance = {}
self.arcroleHasResource = {}
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-"):
inInstance = False
hasResource = False
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
if (not inInstance and
rel.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL) and
any(isinstance(tgtObj, ModelObject) and tgtObj.modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL)
for tgtObj in (rel.fromModelObject, rel.toModelObject))):
inInstance = True
if not hasResource and any(isinstance(resource, ModelResource)
for resource in (rel.fromModelObject, rel.toModelObject)):
hasResource = True
if inInstance and hasResource:
break;
self.arcroleInInstance[arcrole] = inInstance
self.arcroleHasResource[arcrole] = hasResource
def initializeBatch(self, rssObject):
results = self.execute("SELECT filing_number, accepted_timestamp FROM filing")
existingFilings = dict((filingNumber, timestamp)
for filingNumber, timestamp in results)
for rssItem in rssObject.rssItems:
if (rssItem.accessionNumber in existingFilings and
rssItem.acceptanceDatetime == existingFilings[rssItem.accessionNumber]):
rssItem.skipRssItem = True
def insertFiling(self, rssItem):
now = datetime.datetime.now()
entityInfo = self.entityInformation
def rssItemGet(propertyName):
if rssItem is not None:
return getattr(rssItem, propertyName, None)
return None
self.showStatus("insert entity")
LEI = None
entity_comparator = ('legal_entity_number', 'file_number') if LEI else ('file_number',)
table = self.getTable('entity', 'entity_id',
('legal_entity_number',
'file_number',
'reference_number',
'tax_number',
'standard_industry_code',
'name',
'legal_state',
'phone',
'phys_addr1', 'phys_addr2', 'phys_city', 'phys_state', 'phys_zip', 'phys_country',
'mail_addr1', 'mail_addr2', 'mail_city', 'mail_state', 'mail_zip', 'mail_country',
'fiscal_year_end',
'filer_category',
'public_float',
'trading_symbol'),
entity_comparator,
((LEI,
rssItemGet("fileNumber") or entityInfo.get("file-number") or str(int(time.time())),
rssItemGet("cikNumber") or entityInfo.get("cik"),
entityInfo.get("irs-number"),
rssItemGet("assignedSic") or entityInfo.get("assigned-sic") or -1,
rssItemGet("companyName") or entityInfo.get("conformed-name"),
entityInfo.get("state-of-incorporation"),
entityInfo.get("business-address.phone"),
entityInfo.get("business-address.street1"),
entityInfo.get("business-address.street2"),
entityInfo.get("business-address.city"),
entityInfo.get("business-address.state"),
entityInfo.get("business-address.zip"),
countryOfState.get(entityInfo.get("business-address.state")),
entityInfo.get("mail-address.street1"),
entityInfo.get("mail-address.street2"),
entityInfo.get("mail-address.city"),
entityInfo.get("mail-address.state"),
entityInfo.get("mail-address.zip"),
countryOfState.get(entityInfo.get("mail-address.state")),
rssItemGet("fiscalYearEnd") or entityInfo.get("fiscal-year-end"),
entityInfo.get("filer-category"),
entityInfo.get("public-float"),
entityInfo.get("trading-symbol")
),),
checkIfExisting=True,
returnExistenceStatus=True)
if LEI:
for id, _LEI, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
else:
for id, filing_number, existenceStatus in table:
self.entityId = id
self.entityPreviouslyInDB = existenceStatus
break
if any ('former-conformed-name' in key for key in entityInfo.keys()):
self.getTable('former_entity', None,
('entity_id', 'former_name', 'date_changed'),
('entity_id', 'former_name', 'date_changed'),
((self.entityId,
entityInfo.get(keyPrefix + '.former-conformed-name'),
entityInfo.get(keyPrefix + '.date-changed'))
for key in entityInfo.keys() if 'former-conformed-name' in key
for keyPrefix in (key.partition('.')[0],)),
checkIfExisting=True)
self.showStatus("insert filing")
table = self.getTable('filing', 'filing_id',
('filing_number', 'form_type', 'entity_id', 'reference_number',
'accepted_timestamp', 'is_most_current', 'filing_date',
'creation_software',
'authority_html_url', 'entry_url', ),
('filing_number',),
((rssItemGet("accessionNumber") or entityInfo.get("accession-number") or str(int(time.time())),
rssItemGet("formType") or entityInfo.get("form-type"),
self.entityId,
rssItemGet("cikNumber") or entityInfo.get("cik"),
rssItemGet("acceptanceDatetime") or entityInfo.get("acceptance-datetime") or now,
True,
rssItemGet("filingDate") or entityInfo.get("filing-date") or now,
self.modelXbrl.modelDocument.creationSoftware,
rssItemGet("htmlUrl") or entityInfo.get("primary-document-url"),
rssItemGet("url") or entityInfo.get("instance-url")
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, filing_number, existenceStatus in table:
self.filingId = id
self.filingPreviouslyInDB = existenceStatus
break
self.showStatus("insert report")
table = self.getTable('report', 'report_id',
('filing_id', ),
('filing_id',),
((self.filingId,
),),
checkIfExisting=True,
returnExistenceStatus=True)
for id, foundFilingId, existenceStatus in table:
self.reportId = id
self.filingPreviouslyInDB = existenceStatus
break
def isSemanticDocument(self, modelDocument):
if modelDocument.type == Type.SCHEMA:
return modelDocument.inDTS or modelDocument.targetNamespace == "http://arelle.org/doc/2014-01-31"
return modelDocument.type in (Type.INSTANCE, Type.INLINEXBRL, Type.LINKBASE)
def identifyPreexistingDocuments(self):
self.existingDocumentIds = {}
self.urlDocs = {}
docUris = set()
for modelDocument in self.modelXbrl.urlDocs.values():
url = ensureUrl(modelDocument.uri)
self.urlDocs[url] = modelDocument
if self.isSemanticDocument(modelDocument):
docUris.add(self.dbStr(url))
if docUris:
results = self.execute("SELECT document_id, document_url FROM {} WHERE document_url IN ({})"
.format(self.dbTableName("document"),
', '.join(docUris)))
self.existingDocumentIds = dict((self.urlDocs[self.pyStrFromDbStr(docUrl)],docId)
for docId, docUrl in results)
self.isExistingTaxonomyRelSetsOwner = (
self.taxonomyRelSetsOwner.type not in (Type.INSTANCE, Type.INLINEXBRL, Type.INLINEXBRLDOCUMENTSET) and
self.taxonomyRelSetsOwner in self.existingDocumentIds)
def identifyAspectsUsed(self):
self.relationshipSets = [(arcrole, ELR, linkqname, arcqname)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and (arcrole.startswith("XBRL-") or (linkqname and arcqname))]
aspectsUsed = set(f.concept
for f in self.modelXbrl.factsInInstance)
for cntx in self.modelXbrl.contexts.values():
for dim in cntx.qnameDims.values():
aspectsUsed.add(dim.dimension)
if dim.isExplicit:
aspectsUsed.add(dim.member)
else:
aspectsUsed.add(self.modelXbrl.qnameConcepts[dim.typedMember.qname])
for defaultDimQn, defaultDimMemberQn in self.modelXbrl.qnameDimensionDefaults.items():
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimQn])
aspectsUsed.add(self.modelXbrl.qnameConcepts[defaultDimMemberQn])
for relationshipSetKey in self.relationshipSets:
relationshipSet = self.modelXbrl.relationshipSet(*relationshipSetKey)
for rel in relationshipSet.modelRelationships:
if isinstance(rel.fromModelObject, ModelConcept):
aspectsUsed.add(rel.fromModelObject)
if isinstance(rel.toModelObject, ModelConcept):
aspectsUsed.add(rel.toModelObject)
try:
for qn in (XbrlConst.qnXbrliIdentifier, XbrlConst.qnXbrliPeriod, XbrlConst.qnXbrliUnit):
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
except KeyError:
pass
for roleTypes in (self.modelXbrl.roleTypes.values(), self.modelXbrl.arcroleTypes.values()):
for roleUriTypes in roleTypes:
for roleType in roleUriTypes:
for qn in roleType.usedOns:
if qn in self.modelXbrl.qnameConcepts:
aspectsUsed.add(self.modelXbrl.qnameConcepts[qn])
for logEntry in self.loggingEntries:
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
if isinstance(modelObject, ModelConcept) and modelObject.modelDocument.inDTS:
aspectsUsed.add(modelObject)
aspectsUsed |= set(aspect.substitutionGroup
for aspect in aspectsUsed
if aspect is not None)
aspectsUsed -= {None}
self.aspectsUsed = aspectsUsed
typesUsed = set()
def typeUsed(modelType):
if modelType is not None and modelType.modelDocument.inDTS:
typesUsed.add(modelType)
typesDerivedFrom = modelType.typeDerivedFrom
if isinstance(typesDerivedFrom, list):
for typeDerivedFrom in typesDerivedFrom:
if typeDerivedFrom not in typesUsed:
typeUsed(typeDerivedFrom)
else:
if typesDerivedFrom is not None and typesDerivedFrom not in typesUsed:
typeUsed(typesDerivedFrom)
for aspect in aspectsUsed:
modelType = aspect.type
if modelType is not None:
if modelType not in typesUsed:
typeUsed(modelType)
self.typesUsed = typesUsed
def insertDocuments(self):
self.showStatus("insert documents")
table = self.getTable('document', 'document_id',
('document_url', 'document_type', 'namespace'),
('document_url',),
set((ensureUrl(docUrl),
Type.typeName[mdlDoc.type],
mdlDoc.targetNamespace)
for docUrl, mdlDoc in self.modelXbrl.urlDocs.items()
if mdlDoc not in self.existingDocumentIds and
self.isSemanticDocument(mdlDoc)),
checkIfExisting=True)
self.documentIds = dict((self.urlDocs[self.pyStrFromDbStr(url)], id)
for id, url in table)
self.documentIds.update(self.existingDocumentIds)
referencedDocuments = set()
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
referencedDocuments.add( (self.documentIds[mdlDoc], self.documentIds[refDoc] ))
table = self.getTable('referenced_documents',
None,
('object_id','document_id'),
('object_id','document_id'),
referencedDocuments,
checkIfExisting=True)
instDocId = instSchemaDocId = agencySchemaDocId = stdSchemaDocId = None
mdlDoc = self.modelXbrl.modelDocument
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
instDocId = self.documentIds[mdlDoc]
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType == "href" and refDoc in self.documentIds:
instSchemaDocId = self.documentIds[refDoc]
break
elif mdlDoc.type == Type.SCHEMA:
instDocSchemaDocId = self.documentIds[mdlDoc]
for mdlDoc in self.modelXbrl.urlDocs.values():
if mdlDoc.type in (Type.INSTANCE, Type.INLINEXBRL):
referencedDocuments.add( (self.reportId, self.documentIds[mdlDoc] ))
if mdlDoc in self.documentIds:
for refDoc, ref in mdlDoc.referencesDocument.items():
if refDoc.inDTS and ref.referenceType in ("href", "import", "include") \
and refDoc in self.documentIds:
if refDoc.type == Type.SCHEMA:
nsAuthority = authority(refDoc.targetNamespace, includeScheme=False)
nsPath = refDoc.targetNamespace.split('/')
if len(nsPath) > 2:
if ((nsAuthority in ("fasb.org", "xbrl.us") and nsPath[-2] == "us-gaap") or
(nsAuthority == "xbrl.ifrs.org" and nsPath[-1] in ("ifrs", "ifrs-full", "ifrs-smes"))):
stdSchemaDocId = self.documentIds[refDoc]
elif (nsAuthority == "xbrl.sec.gov" and nsPath[-2] == "rr"):
agencySchemaDocId = self.documentIds[refDoc]
self.updateTable("report",
("report_id", "report_data_doc_id", "report_schema_doc_id", "agency_schema_doc_id", "standard_schema_doc_id"),
((self.reportId, instDocId, instSchemaDocId, agencySchemaDocId, stdSchemaDocId),)
)
def insertAspects(self):
self.showStatus("insert aspects")
filingDocumentAspects = set()
existingDocumentUsedAspects = set()
for concept in self.modelXbrl.qnameConcepts.values():
if concept.modelDocument not in self.existingDocumentIds:
filingDocumentAspects.add(concept)
filingDocumentAspectType = concept.type
if filingDocumentAspectType is not None and filingDocumentAspectType not in self.typesUsed:
self.typesUsed.add(filingDocumentAspectType)
elif concept in self.aspectsUsed:
existingDocumentUsedAspects.add(concept)
filingDocumentTypes = set()
existingDocumentUsedTypes = set()
for modelType in self.modelXbrl.qnameTypes.values():
if modelType.modelDocument not in self.existingDocumentIds:
filingDocumentTypes.add(modelType)
elif modelType in self.typesUsed:
existingDocumentUsedTypes.add(modelType)
self.typeQnameId = {}
if existingDocumentUsedTypes:
typeQnameIds = []
table = self.getTable('data_type', 'data_type_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.qname.clarkNotation)
for modelType in existingDocumentUsedTypes
if modelType.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
table = self.getTable('data_type', 'data_type_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'base_type', 'derived_from_type_id'),
('document_id', 'qname',),
tuple((self.documentIds[modelType.modelDocument],
modelType.id,
elementChildSequence(modelType),
modelType.qname.clarkNotation,
modelType.name,
modelType.baseXsdType,
self.typeQnameId.get(modelType.typeDerivedFrom)
if isinstance(modelType.typeDerivedFrom, ModelType) else None)
for modelType in filingDocumentTypes
if modelType.modelDocument in self.documentIds)
)
for typeId, docId, qn in table:
self.typeQnameId[qname(qn)] = typeId
updatesToDerivedFrom = set()
for modelType in filingDocumentTypes:
if isinstance(modelType.typeDerivedFrom, ModelType):
typeDerivedFrom = modelType.typeDerivedFrom
if (typeDerivedFrom in filingDocumentTypes and
modelType.qname in self.typeQnameId and
typeDerivedFrom.qname in self.typeQnameId):
updatesToDerivedFrom.add( (self.typeQnameId[modelType.qname],
self.typeQnameId[typeDerivedFrom.qname]) )
if updatesToDerivedFrom:
self.updateTable('data_type',
('data_type_id', 'derived_from_type_id'),
updatesToDerivedFrom)
existingDocumentUsedTypes.clear() # dereference
filingDocumentTypes.clear() # dereference
self.aspectQnameId = {}
# get existing element IDs
if existingDocumentUsedAspects:
table = self.getTable('aspect', 'aspect_id',
('document_id', 'qname',),
('document_id', 'qname',),
tuple((self.documentIds[concept.modelDocument],
concept.qname.clarkNotation)
for concept in existingDocumentUsedAspects
if concept.modelDocument in self.documentIds),
checkIfExisting=True,
insertIfNotMatched=False)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
aspects = []
for concept in filingDocumentAspects:
niceType = concept.niceType
if niceType is not None and len(niceType) > 128:
niceType = niceType[:128]
if concept.modelDocument in self.documentIds:
aspects.append((self.documentIds[concept.modelDocument],
concept.id,
elementChildSequence(concept),
concept.qname.clarkNotation,
concept.name,
self.typeQnameId.get(concept.typeQname),
niceType[:128] if niceType is not None else None,
self.aspectQnameId.get(concept.substitutionGroupQname),
concept.balance,
concept.periodType,
concept.isAbstract,
concept.isNillable,
concept.isNumeric,
concept.isMonetary,
concept.isTextBlock))
table = self.getTable('aspect', 'aspect_id',
('document_id', 'xml_id', 'xml_child_seq',
'qname', 'name', 'datatype_id', 'base_type', 'substitution_group_aspect_id',
'balance', 'period_type', 'abstract', 'nillable',
'is_numeric', 'is_monetary', 'is_text_block'),
('document_id', 'qname'),
aspects
)
for aspectId, docId, qn in table:
self.aspectQnameId[qname(qn)] = aspectId
updatesToSubstitutionGroup = set()
for concept in filingDocumentAspects:
if concept.substitutionGroup in filingDocumentAspects and concept.modelDocument in self.documentIds:
updatesToSubstitutionGroup.add( (self.aspectQnameId[concept.qname],
self.aspectQnameId.get(concept.substitutionGroupQname)) )
# update derivedFrom's of newly added types
if updatesToSubstitutionGroup:
self.updateTable('aspect',
('aspect_id', 'substitution_group_aspect_id'),
updatesToSubstitutionGroup)
filingDocumentAspects.clear()
existingDocumentUsedAspects.clear()
def insertArcroleTypes(self):
self.showStatus("insert arcrole types")
arcroleTypesByIds = set((self.documentIds[arcroleType.modelDocument],
arcroleType.roleURI)
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'arcrole_uri'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0],
arcroleTypeIDs[1]
)
for arcroleTypeIDs in arcroleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.arcroleTypeIds = {}
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
arcroleTypesByIds = dict(((self.documentIds[arcroleType.modelDocument],
arcroleType.arcroleURI),
arcroleType)
for arcroleTypes in self.modelXbrl.arcroleTypes.values()
for arcroleType in arcroleTypes
if arcroleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('arcrole_type', 'arcrole_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'arcrole_uri', 'cycles_allowed', 'definition'),
('document_id', 'arcrole_uri'),
tuple((arcroleTypeIDs[0],
arcroleType.id,
elementChildSequence(arcroleType),
arcroleType.arcroleURI,
arcroleType.cyclesAllowed,
arcroleType.definition)
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()))
for arcroleId, docId, uri in table:
self.arcroleTypeIds[(docId, uri)] = arcroleId
table = self.getTable('used_on',
None,
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.arcroleTypeIds[(arcroleTypeIDs[0], arcroleType.arcroleURI)],
self.aspectQnameId[usedOnQn])
for arcroleTypeIDs, arcroleType in arcroleTypesByIds.items()
for usedOnQn in arcroleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertRoleTypes(self):
self.showStatus("insert role types")
roleTypesByIds = set((self.documentIds[roleType.modelDocument],
roleType.roleURI)
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'role_uri'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0],
roleTypeIDs[1]
)
for roleTypeIDs in roleTypesByIds),
checkIfExisting=True,
insertIfNotMatched=False)
self.roleTypeIds = {}
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
roleTypesByIds = dict(((self.documentIds[roleType.modelDocument],
roleType.roleURI),
roleType)
for roleTypes in self.modelXbrl.roleTypes.values()
for roleType in roleTypes
if roleType.modelDocument not in self.existingDocumentIds)
table = self.getTable('role_type', 'role_type_id',
('document_id', 'xml_id', 'xml_child_seq', 'role_uri', 'definition'),
('document_id', 'role_uri'),
tuple((roleTypeIDs[0],
roleType.id,
elementChildSequence(roleType),
roleTypeIDs[1],
roleType.definition)
for roleTypeIDs, roleType in roleTypesByIds.items()))
for roleId, docId, uri in table:
self.roleTypeIds[(docId, uri)] = roleId
table = self.getTable('used_on',
None,
('object_id', 'aspect_id'),
('object_id', 'aspect_id'),
tuple((self.roleTypeIds[(roleTypeIDs[0], roleType.roleURI)],
self.aspectQnameId[usedOnQn])
for roleTypeIDs, roleType in roleTypesByIds.items()
for usedOnQn in roleType.usedOns
if usedOnQn in self.aspectQnameId),
checkIfExisting=True)
def insertResources(self):
self.showStatus("insert resources")
arcroles = [arcrole
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR is None and linkqname is None and arcqname is None and not arcrole.startswith("XBRL-")
and self.arcroleHasResource[arcrole]
and (self.arcroleInInstance[arcrole] or not self.isExistingTaxonomyRelSetsOwner)]
uniqueResources = dict(((self.documentIds[resource.modelDocument],
resource.objectIndex), resource)
for arcrole in arcroles
for rel in self.modelXbrl.relationshipSet(arcrole).modelRelationships
for resource in (rel.fromModelObject, rel.toModelObject)
if isinstance(resource, ModelResource))
table = self.getTable('resource', 'resource_id',
('document_id', 'xml_id', 'xml_child_seq', 'qname', 'role', 'value', 'xml_lang'),
('document_id', 'xml_child_seq'),
tuple((self.documentIds[resource.modelDocument],
resource.id,
elementChildSequence(resource),
resource.qname.clarkNotation,
resource.role,
resource.textValue,
resource.xmlLang)
for resource in uniqueResources.values()),
checkIfExisting=True)
self.resourceId = dict(((docId, xml_child_seq), id)
for id, docId, xml_child_seq in table)
uniqueResources.clear()
def modelObjectId(self, modelObject):
if isinstance(modelObject, ModelConcept):
return self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelType):
return self.aspectTypeIds.get(modelObject.qname)
elif isinstance(modelObject, ModelResource):
return self.resourceId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelFact):
return self.factDataPointId.get((self.documentIds[modelObject.modelDocument],
elementChildSequence(modelObject)))
else:
return None
def insertRelationships(self):
self.showStatus("insert relationship sets")
table = self.getTable('relationship_set', 'relationship_set_id',
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
('document_id', 'link_role', 'arc_role', 'link_qname', 'arc_qname'),
tuple((self.documentIds[self.modelXbrl.modelDocument if self.arcroleInInstance[arcrole]
else self.taxonomyRelSetsOwner],
ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys()
if ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])))
self.relSetId = dict(((linkRole, arcRole, lnkQn, arcQn), id)
for id, document_id, linkRole, arcRole, lnkQn, arcQn in table)
dbRels = []
def walkTree(rels, seq, depth, relationshipSet, visited, dbRels, relSetId):
for rel in rels:
if rel not in visited and isinstance(rel.toModelObject, ModelObject):
visited.add(rel)
dbRels.append((rel, seq, depth, relSetId))
seq += 1
seq = walkTree(relationshipSet.fromModelObject(rel.toModelObject), seq, depth+1, relationshipSet, visited, dbRels, relSetId)
visited.remove(rel)
return seq
for arcrole, ELR, linkqname, arcqname in self.modelXbrl.baseSets.keys():
if (ELR and linkqname and arcqname and not arcrole.startswith("XBRL-")
and (not self.isExistingTaxonomyRelSetsOwner or self.arcroleInInstance[arcrole])):
relSetId = self.relSetId[(ELR,
arcrole,
linkqname.clarkNotation,
arcqname.clarkNotation)]
relationshipSet = self.modelXbrl.relationshipSet(arcrole, ELR, linkqname, arcqname)
seq = 1
for rootConcept in relationshipSet.rootConcepts:
seq = walkTree(relationshipSet.fromModelObject(rootConcept), seq, 1, relationshipSet, set(), dbRels, relSetId)
def resourceResourceId(resource):
if isinstance(resource, ModelResource):
return self.resourceId.get((self.documentIds[resource.modelDocument],
resource.sourceline,
resource.objectIndex))
else:
return None
table = self.getTable('relationship', 'relationship_id',
('document_id', 'xml_id', 'xml_child_seq',
'relationship_set_id', 'reln_order',
'from_id', 'to_id', 'calculation_weight',
'tree_sequence', 'tree_depth', 'preferred_label_role'),
('relationship_set_id', 'document_id', 'xml_child_seq'),
tuple((self.documentIds[rel.modelDocument],
rel.id,
elementChildSequence(rel.arcElement),
relSetId,
self.dbNum(rel.order),
self.modelObjectId(rel.fromModelObject),
self.modelObjectId(rel.toModelObject),
self.dbNum(rel.weight),
sequence,
depth,
rel.preferredLabel)
for rel, sequence, depth, relSetId in dbRels
if isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
self.relationshipId = dict(((docId,xml_child_seq), relationshipId)
for relationshipId, relSetId, docId, xml_child_seq in table)
table = self.getTable('root', None,
('relationship_set_id', 'relationship_id'),
('relationship_set_id', 'relationship_id'),
tuple((relSetId,
self.relationshipId[self.documentIds[rel.modelDocument],
elementChildSequence(rel.arcElement)])
for rel, sequence, depth, relSetId in dbRels
if depth == 1 and
isinstance(rel.fromModelObject, ModelObject) and isinstance(rel.toModelObject, ModelObject)))
del dbRels[:]
def insertDataPoints(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior data points of this report")
self.lockTables(("data_point", "entity_identifier", "period", "aspect_value_selection",
"aspect_value_selection_set", "unit_measure", "unit",
"table_data_points"))
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("data_point"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("entity_identifier"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("period"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.aspect_value_selection_id = {1}.aspect_value_selection_id"
.format( self.dbTableName("aspect_value_selection"),
self.dbTableName("aspect_value_selection_set"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1};"
.format( self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {0}.unit_id = {1}.unit_id"
.format( self.dbTableName("unit_measure"),
self.dbTableName("unit"),
reportId),
close=False, fetch=False)
self.execute("DELETE from {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("unit"), reportId),
close=False, fetch=False)
self.execute("DELETE FROM {0} WHERE {0}.report_id = {1}"
.format( self.dbTableName("table_data_points"), reportId),
close=False, fetch=False)
self.showStatus("insert data points")
table = self.getTable('unit', 'unit_id',
('report_id', 'xml_id', 'xml_child_seq', 'measures_hash'),
('report_id', 'measures_hash'),
tuple((reportId,
unit.id,
elementChildSequence(unit),
unit.md5hash)
for unit in dict((unit.md5hash,unit)
for unit in self.modelXbrl.units.values()).values()))
self.unitId = dict(((_reportId, measuresHash), id)
for id, _reportId, measuresHash in table)
table = self.getTable('unit_measure',
None,
('unit_id', 'qname', 'is_multiplicand'),
('unit_id', 'qname', 'is_multiplicand'),
tuple((self.unitId[(reportId,unit.md5hash)],
measure.clarkNotation,
i == 0)
for unit in self.modelXbrl.units.values()
for i in range(2)
for measure in unit.measures[i]))
table = self.getTable('entity_identifier', 'entity_identifier_id',
('report_id', 'scheme', 'identifier'),
('report_id', 'scheme', 'identifier'),
set((reportId,
cntx.entityIdentifier[0],
cntx.entityIdentifier[1])
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True)
self.entityIdentifierId = dict(((_reportId, entScheme, entIdent), id)
for id, _reportId, entScheme, entIdent in table)
table = self.getTable('period', 'period_id',
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
('report_id', 'start_date', 'end_date', 'is_instant', 'is_forever'),
set((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)
for cntx in self.modelXbrl.contexts.values()),
checkIfExisting=True)
self.periodId = dict(((_reportId, start, end, isInstant, isForever), id)
for id, _reportId, start, end, isInstant, isForever in table)
def cntxDimsSet(cntx):
return frozenset((self.aspectQnameId[modelDimValue.dimensionQname],
self.aspectQnameId.get(modelDimValue.memberQname),
modelDimValue.isTyped,
modelDimValue.stringValue if modelDimValue.isTyped else None)
for modelDimValue in cntx.qnameDims.values()
if modelDimValue.dimensionQname in self.aspectQnameId)
cntxAspectValueSelectionSet = dict((cntx, cntxDimsSet(cntx))
for cntx in self.modelXbrl.contexts.values())
aspectValueSelections = set(aspectValueSelectionSet
for cntx, aspectValueSelectionSet in cntxAspectValueSelectionSet.items()
if aspectValueSelectionSet)
self.lockTables(("aspect_value_selection_set",))
self.execute("DELETE FROM {0} WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId),
close=False, fetch=False)
table = self.getTable('aspect_value_selection_set', 'aspect_value_selection_id',
('report_id', ),
('report_id', ),
tuple((reportId,)
for aspectValueSelection in aspectValueSelections)
)
table = self.execute("SELECT aspect_value_selection_id, report_id from {0} "
"WHERE report_id = {1}"
.format(self.dbTableName("aspect_value_selection_set"), reportId))
aspectValueSelectionSets = dict((aspectValueSelections.pop(), id)
for id, _reportId in table)
cntxAspectValueSelectionSetId = dict((cntx, aspectValueSelectionSets[_cntxDimsSet])
for cntx, _cntxDimsSet in cntxAspectValueSelectionSet.items()
if _cntxDimsSet)
table = self.getTable('aspect_value_selection',
None,
('aspect_value_selection_id', 'aspect_id', 'aspect_value_id', 'is_typed_value', 'typed_value'),
('aspect_value_selection_id', ),
tuple((aspectValueSetId, dimId, dimMbrId, isTyped, typedValue)
for aspectValueSelection, aspectValueSetId in aspectValueSelectionSets.items()
for dimId, dimMbrId, isTyped, typedValue in aspectValueSelection)
)
def insertFactSet(modelFacts, parentDatapointId):
facts = []
for fact in modelFacts:
if fact.concept is not None and getattr(fact, "xValid", UNVALIDATED) >= VALID and fact.qname is not None:
cntx = fact.context
documentId = self.documentIds[fact.modelDocument]
facts.append((reportId,
documentId,
fact.id,
elementChildSequence(fact),
fact.sourceline,
parentDatapointId,
self.aspectQnameId.get(fact.qname),
fact.contextID,
self.entityIdentifierId.get((reportId, cntx.entityIdentifier[0], cntx.entityIdentifier[1]))
if cntx is not None else None,
self.periodId.get((reportId,
cntx.startDatetime if cntx.isStartEndPeriod else None,
cntx.endDatetime if (cntx.isStartEndPeriod or cntx.isInstantPeriod) else None,
cntx.isInstantPeriod,
cntx.isForeverPeriod)) if cntx is not None else None,
cntxAspectValueSelectionSetId.get(cntx) if cntx is not None else None,
self.unitId.get((reportId,fact.unit.md5hash)) if fact.unit is not None else None,
fact.isNil,
fact.precision,
fact.decimals,
roundValue(fact.value, fact.precision, fact.decimals) if fact.isNumeric and not fact.isNil else None,
fact.value
))
table = self.getTable('data_point', 'datapoint_id',
('report_id', 'document_id', 'xml_id', 'xml_child_seq', 'source_line',
'parent_datapoint_id',
'aspect_id',
'context_xml_id', 'entity_identifier_id', 'period_id', 'aspect_value_selection_id', 'unit_id',
'is_nil', 'precision_value', 'decimals_value', 'effective_value', 'value'),
('document_id', 'xml_child_seq'),
facts)
xmlIdDataPointId = dict(((docId, xml_child_seq), datapointId)
for datapointId, docId, xml_child_seq in table)
self.factDataPointId.update(xmlIdDataPointId)
for fact in modelFacts:
if fact.isTuple:
try:
insertFactSet(fact.modelTupleFacts,
xmlIdDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))])
except KeyError:
self.modelXbrl.info("xpDB:warning",
_("Loading XBRL DB: tuple's datapoint not found: %(tuple)s"),
modelObject=fact, tuple=fact.qname)
self.factDataPointId = {}
insertFactSet(self.modelXbrl.facts, None)
# hashes
if self.tableFacts: # if any entries
tableDataPoints = []
for roleType, tableCode, fact in self.tableFacts:
try:
tableDataPoints.append((reportId,
self.roleTypeIds[(self.documentIds[roleType.modelDocument],
roleType.roleURI)],
tableCode,
self.factDataPointId[(self.documentIds[fact.modelDocument],
elementChildSequence(fact))]))
except KeyError:
# print ("missing table data points role or data point")
pass
table = self.getTable('table_data_points', None,
('report_id', 'object_id', 'table_code', 'datapoint_id'),
('report_id', 'object_id', 'datapoint_id'),
tableDataPoints)
def insertValidationResults(self):
reportId = self.reportId
if self.filingPreviouslyInDB:
self.showStatus("deleting prior messages of this report")
# remove prior messages for this report
self.lockTables(("message", "message_reference"))
self.execute("DELETE from {0} "
"USING {1} "
"WHERE {1}.report_id = {2} AND {1}.message_id = {0}.message_id"
.format(self.dbTableName("message_reference"),
self.dbTableName("message"),
reportId),
close=False, fetch=False)
self.execute("DELETE FROM {} WHERE message.report_id = {}"
.format(self.dbTableName("message"),reportId),
close=False, fetch=False)
messages = []
messageRefs = defaultdict(set) # direct link to objects
for i, logEntry in enumerate(self.loggingEntries):
sequenceInReport = i+1
for ref in logEntry['refs']:
modelObject = self.modelXbrl.modelObject(ref.get('objectId',''))
# for now just find a concept
objectId = None
if isinstance(modelObject, ModelFact):
objectId = self.factDataPointId.get((self.documentIds.get(modelObject.modelDocument),
elementChildSequence(modelObject)))
elif isinstance(modelObject, ModelRelationship):
objectId = self.relSetId.get((modelObject.linkrole,
modelObject.arcrole,
modelObject.linkQname.clarkNotation,
modelObject.arcElement.qname.clarkNotation))
elif isinstance(modelObject, ModelConcept):
objectId = self.aspectQnameId.get(modelObject.qname)
elif isinstance(modelObject, ModelXbrl):
objectId = reportId
elif hasattr(modelObject, "modelDocument"):
objectId = self.documentIds.get(modelObject.modelDocument)
if objectId is not None:
messageRefs[sequenceInReport].add(objectId)
messages.append((reportId,
sequenceInReport,
logEntry['code'],
logEntry['level'],
logEntry['message']['text']))
if messages:
self.showStatus("insert validation messages")
table = self.getTable('message', 'message_id',
('report_id', 'sequence_in_report', 'message_code', 'message_level', 'value'),
('report_id', 'sequence_in_report'),
messages)
messageIds = dict((sequenceInReport, messageId)
for messageId, _reportId, sequenceInReport in table)
table = self.getTable('message_reference', None,
('message_id', 'object_id'),
('message_id', 'object_id'),
tuple((messageId,
objectId)
for sequenceInReport, objectIds in messageRefs.items()
for objectId in objectIds
for messageId in (messageIds[sequenceInReport],)))
countryOfState = {
"AL": "US","AK": "US","AZ": "US","AR": "US","CA": "US","CO": "US", "CT": "US","DE": "US",
"FL": "US","GA": "US","HI": "US","ID": "US","IL": "US","IN": "US","IA": "US","KS": "US",
"KY": "US","LA": "US","ME": "US","MD": "US","MA": "US","MI": "US","MN": "US","MS": "US",
"MO": "US","MT": "US","NE": "US","NV": "US","NH": "US","NJ": "US","NM": "US","NY": "US",
"NC": "US","ND": "US","OH": "US","OK": "US","OR": "US","PA": "US","RI": "US","SC": "US",
"SD": "US","TN": "US","TX": "US","UT": "US","VT": "US","VA": "US","WA": "US","WV": "US",
"WI": "US","WY": "US","DC": "US","PR": "US","VI": "US","AS": "US","GU": "US","MP": "US",
"AB": "CA","BC": "CA","MB": "CA","NB": "CA","NL": "CA","NS": "CA","ON": "CA","PE": "CA",
"QC": "CA","SK": "CA","NT": "CA","NU": "CA","YT": "CA"}
| true | true |
f72b45704b6b738dc155c0160abe96949099d9a7 | 13,758 | py | Python | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | spacegraphcats/catlas/catlas.py | mogproject/spacegraphcats | e21015daa8e2968c3076bd250c553aa20e6d912b | [
"BSD-3-Clause"
] | null | null | null | """Data structure for CAtlas."""
import argparse
import cProfile
import os
import sys
import tempfile
import gzip
import copy
from .rdomset import rdomset, domination_graph
from .graph_io import read_from_gxt, write_to_gxt
from .graph import Graph
from spacegraphcats.utils.logging import log_command
from io import TextIOWrapper
from collections import defaultdict
from typing import List, Dict, Set
UPPER_RADIUS = 1
class Project(object):
"""Methods for coordinating whole projects."""
def __init__(self, directory, r, checkpoint=True):
"""
Make a project in directory at raidus r.
This object stores the intermediate variables for the CAtlas building
so that they can be checkpointed as necessary.
"""
self.dir = directory
self.r = r
self.checkpoint = checkpoint
self.graph = None
self.idx = 0
self.level = 1
self.level_nodes = None
self.root = CAtlas(self.idx, -1, self.level, list())
# project file names
self.domfilename = os.path.join(self.dir, "first_doms.txt")
self.graphfilename = os.path.join(self.dir, "cdbg.gxt")
self.catlasfilename = os.path.join(self.dir, "catlas.csv")
def existing_checkpoints(self):
"""Get the existing checkpoint files."""
files = []
for f in os.listdir(self.dir):
name, ext = os.path.splitext(f)
if ext == ".checkpoint":
r, level = map(int, name.split("_"))
if r == self.r:
files.append(level)
return list(sorted(files))
def cp_name(self, level):
"""Return the name of the checkpoint file after level level."""
return os.path.join(self.dir,
"{}_{}.checkpoint".format(self.r, level))
def load_furthest_checkpoint(self):
"""Load the checkpoint that is furthest along."""
existing = self.existing_checkpoints()
# if there are no checkpoints or we don't want to load from one,
# just read G from the graph file
if len(existing) == 0 or not self.checkpoint:
print("Loading graph from {}".format(self.graphfilename))
# we only need to set the graph variable since index, level, and
# previous nodes have the proper values by default
with open(self.graphfilename, 'r') as graph_file:
self.graph = read_from_gxt(graph_file, self.r, False)
else:
self.load_checkpoint(existing[-1])
def load_checkpoint(self, level):
"""Read cached information from a partial catlas computation."""
if not self.checkpoint:
raise IOError("I told you I didn't want to load from checkpoint!")
print("Loading results of building level {}".format(level))
# the temp file contains catlas and graph information. To use the
# readers for catlas and graph, we need to temporarily split them into
# separate files
tmpf = tempfile.TemporaryFile(mode='r+')
infile = self.cp_name(level)
with gzip.open(infile, 'rt') as f:
# read until the end of the catlas
for line in f:
if line == "###\n":
break
tmpf.write(line)
# once we are at the graph section, start reading from there
self.graph = read_from_gxt(f, radius=UPPER_RADIUS, directed=False,
sequential=False)
# move back to the beginning of the temporary file and read the
# catlas
tmpf.seek(0)
root = CAtlas.read(tmpf)
tmpf.close()
# the checkpointed CAtlas has a dummy root. The nodes in the
# current level need to be removed from the root because we haven't
# finished constructing their parents.
unfinished_idx = -1*len(self.graph)
unfinished = root.children[unfinished_idx:]
root.children = root.children[:unfinished_idx]
self.level_nodes = {node.vertex: node for node in unfinished}
self.idx = root.idx
self.level = root.level - 1
self.root = root
def _save(self):
"""Method used by the thread to write out."""
outfile = self.cp_name(self.level - 1)
print("Writing to file {}".format(outfile))
with gzip.open(outfile, 'wt') as f:
# make a dummy root to write the catlas using catlas.write method
# we add all current level nodes as children of the root
root = CAtlas(self.idx, -1, self.level,
copy.copy(self.root.children))
root.children.extend(self.level_nodes.values())
root.write(f)
f.write("###\n")
write_to_gxt(f, self.graph)
def save_checkpoint(self):
"""Write out a partial computation."""
if not self.checkpoint:
return
else:
self._save()
class CAtlas(object):
"""Hierarchical atlas for querying graphs."""
LEVEL_THRESHOLD = 10
def __init__(self, idx, vertex, level, children):
"""
Construct a CAtlas node.
Arguments:
idx: Integer identifier of the node. A CAtlas with n nodes will
have ids 0,1,...,n-1. The root will always have id n-1.
vertex: Name of vertex in the cDBG
level: The height of the node in the hierarchy. The leaves are at
level 1, their parents at level 2, etc.
children: the CAtlas nodes for which this is a parent
"""
self.idx = idx
self.vertex = vertex
self.children = children
self.level = level
@staticmethod
def build(proj, benchmark_only=False):
"""Build a CAtlas at a given radius."""
# keep creating progressively smaller graphs until we hit the level
# threshold or steady state
while True:
print()
# the base level should have a large radius, others are just 1
if proj.level == 1:
r = proj.r
else:
r = UPPER_RADIUS
# build the current level
nodes, domgraph, dominated = CAtlas._build_level(proj.graph,
r,
proj.level,
proj.idx,
proj.level_nodes)
print("Catlas level {} complete".format(proj.level))
# at the bottom level we need to write out the domination
# assignment
if proj.level == 1 and not benchmark_only:
with open(proj.domfilename, 'w') as domfile:
for v, shadow in dominated.items():
domstr = str(v)
for u in shadow:
domstr += " {}".format(u)
domstr += "\n"
domfile.write(domstr)
# increment the index and level now so they are correctly adjusted
# if we happen to return
proj.idx += len(nodes)
proj.level += 1
# Keeping isolated vertices as parents of themselves blows up the
# CAtlas size unnecessarily. We need to immediately make them
# children of the root.
for v in dominated:
if v not in domgraph:
proj.root.children.append(nodes.pop(v))
# quit if our level is sufficiently small
if len(domgraph) <= CAtlas.LEVEL_THRESHOLD or \
len(domgraph) == len(proj.graph):
break
# prep for the next iteration
proj.graph = domgraph
proj.level_nodes = nodes
# write level results to the checkpoint file if applicable
proj.save_checkpoint()
if benchmark_only:
return None
if not nodes:
return None
# place all remaining nodes as children of the root
proj.root.children.extend(nodes.values())
proj.root.level = proj.level
proj.root.vertex = list(nodes.keys())[0]
proj.root.idx = proj.idx
return proj.root
@staticmethod
def _build_level(graph: Graph, radius: int, level: int, min_id: int=0,
prev_nodes: List[int]=None):
# find the domgraph of the current domgraph
domset = rdomset(graph, radius)
# dominated maps dominating vertices to a list of the vertices they
# optimally dominate
domgraph, dominated = domination_graph(graph, domset, radius)
# create the CAtlas nodes
nodes = {} # type: Dict[int, CAtlas]
for idx, v in enumerate(domset):
# if no previous nodes were supplied, we assume we are on the
# bottom level and thus the children field is empty
if prev_nodes is None:
children = [] # type: List[int]
else:
children = [prev_nodes[u] for u in dominated[v]]
nodes[v] = CAtlas(min_id+idx, v, level, children)
return nodes, domgraph, dominated
def leaves(self, visited: Set[object]=None) -> Set[object]:
"""Find the descendants of this node with no children."""
# this function is recursive so we need to keep track of nodes we
# already visited
if visited is None:
visited = set([self])
# base case is level 0
if self.level == 1:
return set([self])
# otherwise gather the leaves of the children
res = set() # type: Set[object]
for c in self.children:
if c not in visited:
visited.add(c)
res |= c.leaves(visited)
return res
def write(self, outfile: TextIOWrapper):
"""Write the connectivity of the CAtlas to file."""
# doesn't matter how we traverse the graph, so we use DFS for ease of
# implementation
stack = [self]
seen = set()
while len(stack) > 0:
# remove from the stack
curr = stack.pop()
# write node information
child_str = " ".join(str(child.idx) for child in curr.children)
outfile.write("{},{},{},{}\n".format(curr.idx,
curr.vertex,
curr.level,
child_str))
# all nodes already seen don't get re-added
seen.add(curr)
stack.extend(filter(lambda x: x not in seen, curr.children))
@classmethod
def read(cls, catlas_file):
"""Load the catlas Directed Acyclic Graph."""
children = []
nodes = []
# load everything from the catlas file
for line in catlas_file:
catlas_node, cdbg_node, level, beneath = line.strip().split(',')
level = int(level)
catlas_node = int(catlas_node)
cdbg_node = int(cdbg_node)
# extend arrays as necessary
if len(children) <= catlas_node:
for i in range(catlas_node - len(children) + 1):
children.append([])
nodes.append(None)
# parse out the children
beneath = beneath.strip()
if beneath:
beneath = beneath.split(' ')
children[catlas_node].extend(map(int, beneath))
# make the new node with empty children
node = cls(catlas_node, cdbg_node, level, [])
nodes[catlas_node] = node
# update the nodes with pointers to their children
for i, n in enumerate(nodes):
for child in children[n.idx]:
n.children.append(nodes[child])
return nodes[-1]
def main(args):
"""Build a CAtlas for the provided input graph."""
# unpack command line arguments
r = args.radius
proj_dir = args.project
checkpoint = not args.no_checkpoint
level = args.level
# make checkpoint
proj = Project(proj_dir, r, checkpoint)
print("reading graph")
if level:
print("Loading checkpoint at level {}".format(level))
proj.load_checkpoint(level)
else:
print("Loading checkpoint")
proj.load_furthest_checkpoint()
print("reading complete")
print("building catlas")
cat = CAtlas.build(proj)
if cat is None:
print("ERROR: catlas is empty!? exiting.")
return -1
print("catlas built")
print("writing graph")
with open(proj.catlasfilename, 'w') as cfile:
cat.write(cfile)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("project", help="Project directory",
type=str)
parser.add_argument("radius", help="Catlas radius", type=int)
parser.add_argument("-n", "--no_checkpoint", action='store_true',
help="Do not read or write checkpoints")
parser.add_argument("-l", "--level", type=int,
help="Level at which to load the checkpoint."
"Defaults to highest level saved when not invoked.")
args = parser.parse_args()
exit_val = main(args)
# prof = cProfile.Profile()
# prof.run("main(args)")
# prof.print_stats('tottime')
log_command(args.project, sys.argv)
sys.exit(exit_val)
| 36.590426 | 79 | 0.559674 |
import argparse
import cProfile
import os
import sys
import tempfile
import gzip
import copy
from .rdomset import rdomset, domination_graph
from .graph_io import read_from_gxt, write_to_gxt
from .graph import Graph
from spacegraphcats.utils.logging import log_command
from io import TextIOWrapper
from collections import defaultdict
from typing import List, Dict, Set
UPPER_RADIUS = 1
class Project(object):
def __init__(self, directory, r, checkpoint=True):
self.dir = directory
self.r = r
self.checkpoint = checkpoint
self.graph = None
self.idx = 0
self.level = 1
self.level_nodes = None
self.root = CAtlas(self.idx, -1, self.level, list())
self.domfilename = os.path.join(self.dir, "first_doms.txt")
self.graphfilename = os.path.join(self.dir, "cdbg.gxt")
self.catlasfilename = os.path.join(self.dir, "catlas.csv")
def existing_checkpoints(self):
files = []
for f in os.listdir(self.dir):
name, ext = os.path.splitext(f)
if ext == ".checkpoint":
r, level = map(int, name.split("_"))
if r == self.r:
files.append(level)
return list(sorted(files))
def cp_name(self, level):
return os.path.join(self.dir,
"{}_{}.checkpoint".format(self.r, level))
def load_furthest_checkpoint(self):
existing = self.existing_checkpoints()
# just read G from the graph file
if len(existing) == 0 or not self.checkpoint:
print("Loading graph from {}".format(self.graphfilename))
# we only need to set the graph variable since index, level, and
# previous nodes have the proper values by default
with open(self.graphfilename, 'r') as graph_file:
self.graph = read_from_gxt(graph_file, self.r, False)
else:
self.load_checkpoint(existing[-1])
def load_checkpoint(self, level):
if not self.checkpoint:
raise IOError("I told you I didn't want to load from checkpoint!")
print("Loading results of building level {}".format(level))
tmpf = tempfile.TemporaryFile(mode='r+')
infile = self.cp_name(level)
with gzip.open(infile, 'rt') as f:
for line in f:
if line == "###\n":
break
tmpf.write(line)
self.graph = read_from_gxt(f, radius=UPPER_RADIUS, directed=False,
sequential=False)
tmpf.seek(0)
root = CAtlas.read(tmpf)
tmpf.close()
# finished constructing their parents.
unfinished_idx = -1*len(self.graph)
unfinished = root.children[unfinished_idx:]
root.children = root.children[:unfinished_idx]
self.level_nodes = {node.vertex: node for node in unfinished}
self.idx = root.idx
self.level = root.level - 1
self.root = root
def _save(self):
outfile = self.cp_name(self.level - 1)
print("Writing to file {}".format(outfile))
with gzip.open(outfile, 'wt') as f:
# make a dummy root to write the catlas using catlas.write method
# we add all current level nodes as children of the root
root = CAtlas(self.idx, -1, self.level,
copy.copy(self.root.children))
root.children.extend(self.level_nodes.values())
root.write(f)
f.write("###\n")
write_to_gxt(f, self.graph)
def save_checkpoint(self):
if not self.checkpoint:
return
else:
self._save()
class CAtlas(object):
LEVEL_THRESHOLD = 10
def __init__(self, idx, vertex, level, children):
self.idx = idx
self.vertex = vertex
self.children = children
self.level = level
@staticmethod
def build(proj, benchmark_only=False):
# keep creating progressively smaller graphs until we hit the level
# threshold or steady state
while True:
print()
# the base level should have a large radius, others are just 1
if proj.level == 1:
r = proj.r
else:
r = UPPER_RADIUS
# build the current level
nodes, domgraph, dominated = CAtlas._build_level(proj.graph,
r,
proj.level,
proj.idx,
proj.level_nodes)
print("Catlas level {} complete".format(proj.level))
# at the bottom level we need to write out the domination
# assignment
if proj.level == 1 and not benchmark_only:
with open(proj.domfilename, 'w') as domfile:
for v, shadow in dominated.items():
domstr = str(v)
for u in shadow:
domstr += " {}".format(u)
domstr += "\n"
domfile.write(domstr)
# increment the index and level now so they are correctly adjusted
# if we happen to return
proj.idx += len(nodes)
proj.level += 1
# Keeping isolated vertices as parents of themselves blows up the
# CAtlas size unnecessarily. We need to immediately make them
# children of the root.
for v in dominated:
if v not in domgraph:
proj.root.children.append(nodes.pop(v))
# quit if our level is sufficiently small
if len(domgraph) <= CAtlas.LEVEL_THRESHOLD or \
len(domgraph) == len(proj.graph):
break
# prep for the next iteration
proj.graph = domgraph
proj.level_nodes = nodes
# write level results to the checkpoint file if applicable
proj.save_checkpoint()
if benchmark_only:
return None
if not nodes:
return None
# place all remaining nodes as children of the root
proj.root.children.extend(nodes.values())
proj.root.level = proj.level
proj.root.vertex = list(nodes.keys())[0]
proj.root.idx = proj.idx
return proj.root
@staticmethod
def _build_level(graph: Graph, radius: int, level: int, min_id: int=0,
prev_nodes: List[int]=None):
# find the domgraph of the current domgraph
domset = rdomset(graph, radius)
# dominated maps dominating vertices to a list of the vertices they
# optimally dominate
domgraph, dominated = domination_graph(graph, domset, radius)
# create the CAtlas nodes
nodes = {} # type: Dict[int, CAtlas]
for idx, v in enumerate(domset):
# if no previous nodes were supplied, we assume we are on the
# bottom level and thus the children field is empty
if prev_nodes is None:
children = [] # type: List[int]
else:
children = [prev_nodes[u] for u in dominated[v]]
nodes[v] = CAtlas(min_id+idx, v, level, children)
return nodes, domgraph, dominated
def leaves(self, visited: Set[object]=None) -> Set[object]:
# this function is recursive so we need to keep track of nodes we
# already visited
if visited is None:
visited = set([self])
# base case is level 0
if self.level == 1:
return set([self])
# otherwise gather the leaves of the children
res = set() # type: Set[object]
for c in self.children:
if c not in visited:
visited.add(c)
res |= c.leaves(visited)
return res
def write(self, outfile: TextIOWrapper):
# doesn't matter how we traverse the graph, so we use DFS for ease of
stack = [self]
seen = set()
while len(stack) > 0:
curr = stack.pop()
child_str = " ".join(str(child.idx) for child in curr.children)
outfile.write("{},{},{},{}\n".format(curr.idx,
curr.vertex,
curr.level,
child_str))
seen.add(curr)
stack.extend(filter(lambda x: x not in seen, curr.children))
@classmethod
def read(cls, catlas_file):
children = []
nodes = []
# load everything from the catlas file
for line in catlas_file:
catlas_node, cdbg_node, level, beneath = line.strip().split(',')
level = int(level)
catlas_node = int(catlas_node)
cdbg_node = int(cdbg_node)
# extend arrays as necessary
if len(children) <= catlas_node:
for i in range(catlas_node - len(children) + 1):
children.append([])
nodes.append(None)
# parse out the children
beneath = beneath.strip()
if beneath:
beneath = beneath.split(' ')
children[catlas_node].extend(map(int, beneath))
# make the new node with empty children
node = cls(catlas_node, cdbg_node, level, [])
nodes[catlas_node] = node
# update the nodes with pointers to their children
for i, n in enumerate(nodes):
for child in children[n.idx]:
n.children.append(nodes[child])
return nodes[-1]
def main(args):
# unpack command line arguments
r = args.radius
proj_dir = args.project
checkpoint = not args.no_checkpoint
level = args.level
# make checkpoint
proj = Project(proj_dir, r, checkpoint)
print("reading graph")
if level:
print("Loading checkpoint at level {}".format(level))
proj.load_checkpoint(level)
else:
print("Loading checkpoint")
proj.load_furthest_checkpoint()
print("reading complete")
print("building catlas")
cat = CAtlas.build(proj)
if cat is None:
print("ERROR: catlas is empty!? exiting.")
return -1
print("catlas built")
print("writing graph")
with open(proj.catlasfilename, 'w') as cfile:
cat.write(cfile)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("project", help="Project directory",
type=str)
parser.add_argument("radius", help="Catlas radius", type=int)
parser.add_argument("-n", "--no_checkpoint", action='store_true',
help="Do not read or write checkpoints")
parser.add_argument("-l", "--level", type=int,
help="Level at which to load the checkpoint."
"Defaults to highest level saved when not invoked.")
args = parser.parse_args()
exit_val = main(args)
# prof = cProfile.Profile()
# prof.run("main(args)")
# prof.print_stats('tottime')
log_command(args.project, sys.argv)
sys.exit(exit_val)
| true | true |
f72b457385a4ebe5715f4121fb45c8d4b9cc7073 | 4,175 | py | Python | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | 1 | 2018-01-17T02:17:55.000Z | 2018-01-17T02:17:55.000Z | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | 16 | 2017-02-03T06:31:39.000Z | 2020-03-03T15:15:09.000Z | frontera/core/components.py | vy-labs/frontera | ba2c07470111270de090215dc4ebc051bbd8b7e0 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABCMeta, abstractmethod
class Component(object):
"""
Interface definition for a frontier component
The :class:`Component <frontera.core.components.Component>` object is the base class for frontier
:class:`Middleware <frontera.core.components.Middleware>` and
:class:`Backend <frontera.core.components.Backend>` objects.
:class:`FrontierManager <frontera.core.manager.FrontierManager>` communicates with the active components
using the hook methods listed below.
Implementations are different for :class:`Middleware <frontera.core.components.Middleware>` and
:class:`Backend <frontera.core.components.Backend>` objects, therefore methods are not fully described here
but in their corresponding section.
"""
__metaclass__ = ABCMeta
component_name = 'Base Component'
@abstractmethod
def frontier_start(self):
"""
Called when the frontier starts, see :ref:`starting/stopping the frontier <frontier-start-stop>`.
"""
pass
@abstractmethod
def frontier_stop(self):
"""
Called when the frontier stops, see :ref:`starting/stopping the frontier <frontier-start-stop>`.
"""
pass
@abstractmethod
def add_seeds(self, seeds):
"""
This method is called when new seeds are are added to the frontier.
:param list seeds: A list of :class:`Request <frontera.core.models.Request>` objects.
"""
pass
@abstractmethod
def page_crawled(self, response):
"""
This method is called each time a page has been crawled.
:param object response: The :class:`Response <frontera.core.models.Response>` object for the crawled page.
"""
pass
@abstractmethod
def links_extracted(self, request, links):
"""
:param oject request: The :class:`Request <frontera.core.models.Request>` object.
:param list links: A list of :class:`Request <frontera.core.models.Request>` objects generated from \
the links extracted for the crawled page.
"""
pass
@abstractmethod
def request_error(self, page, error):
"""
This method is called each time an error occurs when crawling a page
:param object request: The crawled with error :class:`Request <frontera.core.models.Request>` object.
:param string error: A string identifier for the error.
"""
pass
@property
def name(self):
"""
The component name
"""
return self.component_name
@classmethod
def from_manager(cls, manager):
"""
Class method called from :class:`FrontierManager <frontera.core.manager.FrontierManager>` passing the
manager itself.
Example of usage::
def from_manager(cls, manager):
return cls(settings=manager.settings)
"""
return cls()
class Backend(Component):
"""Interface definition for a Frontier Backend"""
__metaclass__ = ABCMeta
component_name = 'Base Backend'
@abstractmethod
def get_next_requests(self, max_n_requests, **kwargs):
"""
Returns a list of next requests to be crawled.
:param int max_next_requests: Maximum number of requests to be returned by this method.
:param dict kwargs: A parameters from downloader component.
:return: list of :class:`Request <frontera.core.models.Request>` objects.
"""
raise NotImplementedError
class Middleware(Component):
"""Interface definition for a Frontier Middlewares"""
__metaclass__ = ABCMeta
component_name = 'Base Middleware'
class CanonicalSolver(Component):
"""Interface definition for a Frontera Canonical Solver"""
__metaclass__ = ABCMeta
component_name = 'Base CanonicalSolver'
@abstractmethod
def get_canonical_url(self, response):
"""
Returns canonical URL string for response.
:param object response: The :class:`Response <frontera.core.models.Response>` object for the crawled page.
:return: str
"""
raise NotImplementedError
| 31.390977 | 114 | 0.662275 | from abc import ABCMeta, abstractmethod
class Component(object):
__metaclass__ = ABCMeta
component_name = 'Base Component'
@abstractmethod
def frontier_start(self):
pass
@abstractmethod
def frontier_stop(self):
pass
@abstractmethod
def add_seeds(self, seeds):
pass
@abstractmethod
def page_crawled(self, response):
pass
@abstractmethod
def links_extracted(self, request, links):
pass
@abstractmethod
def request_error(self, page, error):
pass
@property
def name(self):
return self.component_name
@classmethod
def from_manager(cls, manager):
return cls()
class Backend(Component):
__metaclass__ = ABCMeta
component_name = 'Base Backend'
@abstractmethod
def get_next_requests(self, max_n_requests, **kwargs):
raise NotImplementedError
class Middleware(Component):
__metaclass__ = ABCMeta
component_name = 'Base Middleware'
class CanonicalSolver(Component):
__metaclass__ = ABCMeta
component_name = 'Base CanonicalSolver'
@abstractmethod
def get_canonical_url(self, response):
raise NotImplementedError
| true | true |
f72b4575023d53c977d16e195686c5c67ffc5f9f | 3,076 | py | Python | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | 5 | 2019-08-31T11:00:40.000Z | 2021-04-15T10:05:35.000Z | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | null | null | null | src/hypergraph.py | HKUST-KnowComp/HPHG | 48b704b28c217e4590edf4dd3c7825495dffb76e | [
"MIT"
] | 1 | 2020-05-27T08:00:53.000Z | 2020-05-27T08:00:53.000Z | import numpy as np
from tqdm import tqdm
class Hypergraph(object):
def __init__(self,graph_type='0',nums_type=None):
self._nodes = {} # node set
self._edges = {} # edge set (hash index)
self.graph_type = graph_type # graph type, homogeneous:0, heterogeneous:1
self.nums_type = nums_type # for heterogeneous graph, number of different node type
self.cumsum = np.cumsum(self.nums_type) if self.graph_type=='1' else None # cumsum of nums_type
def add_edge(self, edge_name, e):
'''
Add a hyperedge.
edge_name: name of hyperedge
edge: node list of hyperedge
weight: weight of hyperedge
'''
edge = tuple(sorted(e))
self._edges[edge] = self._edges.get(edge,0)+1
for v in edge:
node_dict = self._nodes.get(v, {})
neighbors = node_dict.get('neighbors', set())
for v0 in edge:
if v0!=v:
neighbors.add(v0)
node_dict['neighbors'] = neighbors
if self.graph_type=='1':
for i,k in enumerate(self.cumsum):
if int(v) < k:
break
node_dict['type'] = i
self._nodes[v] = node_dict
def edge_weight(self, e):
'''weight of weight e'''
return self._edges.get(e,0)
def nodes(self):
'''node set'''
return self._nodes.keys()
def edges(self):
'''edge set'''
return self._edges.keys()
def neighbors(self, n):
'''neighbors of node n'''
return self._nodes[n]['neighbors']
def node_type(self, n):
'''type of node n'''
return self._nodes[n]['type']
def get_indecom_factor(G, r):
'''
Get the indecomposable factor of heterogeneous hyper-network G.
'''
edges = list(G.edges())
k = len(G.nums_type)
m = len(edges)
dcnt = []
for i in range(k):
dcnt.append({})
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
dcnt[i][subedge] = dcnt[i].get(subedge,0)+1
factors = [0]*k
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
if dcnt[i].get(subedge,0)>1:
factors[i]+=1
factors = [factor/m for factor in factors]
cumsum = [0]+list(G.cumsum)
ps = [0]*k
neg_num = m*r # sample enough random edges
for i in tqdm(range(neg_num),ascii=True):
random_edge = []
for i in range(k):
random_edge.append(np.random.randint(cumsum[i],cumsum[i+1]))
for i in range(k):
subedge = tuple(sorted(random_edge[:i]+random_edge[i+1:]))
if dcnt[i].get(subedge,0)>1 or (dcnt[i].get(subedge,0)>0 and tuple(random_edge) not in edges):
ps[i]+=1
ps = [p/neg_num for p in ps]
indecom_factors = [ps[i]/factors[i] for i in range(k)]
return indecom_factors
| 28.747664 | 106 | 0.545189 | import numpy as np
from tqdm import tqdm
class Hypergraph(object):
def __init__(self,graph_type='0',nums_type=None):
self._nodes = {}
self._edges = {}
self.graph_type = graph_type
self.nums_type = nums_type
self.cumsum = np.cumsum(self.nums_type) if self.graph_type=='1' else None
def add_edge(self, edge_name, e):
edge = tuple(sorted(e))
self._edges[edge] = self._edges.get(edge,0)+1
for v in edge:
node_dict = self._nodes.get(v, {})
neighbors = node_dict.get('neighbors', set())
for v0 in edge:
if v0!=v:
neighbors.add(v0)
node_dict['neighbors'] = neighbors
if self.graph_type=='1':
for i,k in enumerate(self.cumsum):
if int(v) < k:
break
node_dict['type'] = i
self._nodes[v] = node_dict
def edge_weight(self, e):
return self._edges.get(e,0)
def nodes(self):
return self._nodes.keys()
def edges(self):
return self._edges.keys()
def neighbors(self, n):
return self._nodes[n]['neighbors']
def node_type(self, n):
return self._nodes[n]['type']
def get_indecom_factor(G, r):
edges = list(G.edges())
k = len(G.nums_type)
m = len(edges)
dcnt = []
for i in range(k):
dcnt.append({})
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
dcnt[i][subedge] = dcnt[i].get(subedge,0)+1
factors = [0]*k
for edge in edges:
edge = list(edge)
for i in range(k):
subedge = tuple(sorted(edge[:i]+edge[i+1:]))
if dcnt[i].get(subedge,0)>1:
factors[i]+=1
factors = [factor/m for factor in factors]
cumsum = [0]+list(G.cumsum)
ps = [0]*k
neg_num = m*r
for i in tqdm(range(neg_num),ascii=True):
random_edge = []
for i in range(k):
random_edge.append(np.random.randint(cumsum[i],cumsum[i+1]))
for i in range(k):
subedge = tuple(sorted(random_edge[:i]+random_edge[i+1:]))
if dcnt[i].get(subedge,0)>1 or (dcnt[i].get(subedge,0)>0 and tuple(random_edge) not in edges):
ps[i]+=1
ps = [p/neg_num for p in ps]
indecom_factors = [ps[i]/factors[i] for i in range(k)]
return indecom_factors
| true | true |
f72b45b62702b41808f02cf163bf8b5b63de5b30 | 5,644 | py | Python | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | Tests/Cube.py | joel-intito/tm1py | 42e59dcdeb70357577c19e974995936b5dbb1131 | [
"MIT"
] | null | null | null | import configparser
from pathlib import Path
import unittest
import uuid
from TM1py import Element, Hierarchy, Dimension
from TM1py.Objects import Cube
from TM1py.Objects import Rules
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_Cube_"
class TestCubeMethods(unittest.TestCase):
tm1 = None
cube_name = PREFIX + "some_name"
dimension_names = [
PREFIX + "dimension1",
PREFIX + "dimension2",
PREFIX + "dimension3"]
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
# Build Dimensions
for dimension_name in cls.dimension_names:
elements = [Element('Element {}'.format(str(j)), 'Numeric') for j in range(1, 1001)]
hierarchy = Hierarchy(dimension_name=dimension_name,
name=dimension_name,
elements=elements)
dimension = Dimension(dimension_name, [hierarchy])
if not cls.tm1.dimensions.exists(dimension.name):
cls.tm1.dimensions.create(dimension)
# Build Cube
cube = Cube(cls.cube_name, cls.dimension_names)
if not cls.tm1.cubes.exists(cls.cube_name):
cls.tm1.cubes.create(cube)
c = Cube(cls.cube_name, dimensions=cls.dimension_names, rules=Rules(''))
if cls.tm1.cubes.exists(c.name):
cls.tm1.cubes.delete(c.name)
cls.tm1.cubes.create(c)
def test_get_cube(self):
c = self.tm1.cubes.get(self.cube_name)
self.assertIsInstance(c, Cube)
self.assertEqual(c.dimensions, self.dimension_names)
cubes = self.tm1.cubes.get_all()
control_cubes = self.tm1.cubes.get_control_cubes()
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertEqual(len(cubes), len(control_cubes + model_cubes))
def test_update_cube(self):
c = self.tm1.cubes.get(self.cube_name)
c.rules = Rules("SKIPCHECK;\nFEEDERS;")
self.tm1.cubes.update(c)
# test if rule was actually updated
c = self.tm1.cubes.get(self.cube_name)
self.assertEqual(c.rules.text, "SKIPCHECK;\nFEEDERS;")
self.assertTrue(c.skipcheck)
def test_get_control_cubes(self):
control_cubes = self.tm1.cubes.get_control_cubes()
self.assertGreater(len(control_cubes), 0)
for cube in control_cubes:
self.assertTrue(cube.name.startswith("}"))
def test_get_model_cubes(self):
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertGreater(len(model_cubes), 0)
for cube in model_cubes:
self.assertFalse(cube.name.startswith("}"))
def test_get_dimension_names(self):
dimension_names = self.tm1.cubes.get_dimension_names(self.cube_name)
self.assertEqual(dimension_names, self.dimension_names)
def test_get_random_intersection(self):
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
self.assertNotEqual(intersection1, intersection2)
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
self.assertNotEqual(intersection1, intersection2)
def test_exists(self):
self.assertTrue(self.tm1.cubes.exists(self.cube_name))
self.assertFalse(self.tm1.cubes.exists(uuid.uuid4()))
def test_create_delete_cube(self):
cube_name = PREFIX + "Some_Other_Name"
# element with index 0 is Sandboxes
dimension_names = self.tm1.dimensions.get_all_names()[1:3]
cube = Cube(cube_name, dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.create(cube)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(
len(all_cubes_before) + 1,
len(all_cubes_after))
self.assertEqual(
self.tm1.cubes.get_dimension_names(cube_name),
dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.delete(cube_name)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(len(all_cubes_before) - 1, len(all_cubes_after))
def test_get_storage_dimension_order(self):
dimensions = self.tm1.cubes.get_storage_dimension_order(cube_name=self.cube_name)
self.assertEqual(dimensions, self.dimension_names)
def test_update_storage_dimension_order(self):
self.tm1.cubes.update_storage_dimension_order(
cube_name=self.cube_name,
dimension_names=reversed(self.dimension_names))
dimensions = self.tm1.cubes.get_storage_dimension_order(self.cube_name)
self.assertEqual(
list(reversed(dimensions)),
self.dimension_names)
def test_load(self):
response = self.tm1.cubes.load(cube_name=self.cube_name)
self.assertTrue(response.ok)
def test_unload(self):
response = self.tm1.cubes.unload(cube_name=self.cube_name)
self.assertTrue(response.ok)
@classmethod
def tearDownClass(cls):
cls.tm1.cubes.delete(cls.cube_name)
for dimension in cls.dimension_names:
cls.tm1.dimensions.delete(dimension)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| 38.394558 | 108 | 0.677711 | import configparser
from pathlib import Path
import unittest
import uuid
from TM1py import Element, Hierarchy, Dimension
from TM1py.Objects import Cube
from TM1py.Objects import Rules
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_Cube_"
class TestCubeMethods(unittest.TestCase):
tm1 = None
cube_name = PREFIX + "some_name"
dimension_names = [
PREFIX + "dimension1",
PREFIX + "dimension2",
PREFIX + "dimension3"]
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
for dimension_name in cls.dimension_names:
elements = [Element('Element {}'.format(str(j)), 'Numeric') for j in range(1, 1001)]
hierarchy = Hierarchy(dimension_name=dimension_name,
name=dimension_name,
elements=elements)
dimension = Dimension(dimension_name, [hierarchy])
if not cls.tm1.dimensions.exists(dimension.name):
cls.tm1.dimensions.create(dimension)
cube = Cube(cls.cube_name, cls.dimension_names)
if not cls.tm1.cubes.exists(cls.cube_name):
cls.tm1.cubes.create(cube)
c = Cube(cls.cube_name, dimensions=cls.dimension_names, rules=Rules(''))
if cls.tm1.cubes.exists(c.name):
cls.tm1.cubes.delete(c.name)
cls.tm1.cubes.create(c)
def test_get_cube(self):
c = self.tm1.cubes.get(self.cube_name)
self.assertIsInstance(c, Cube)
self.assertEqual(c.dimensions, self.dimension_names)
cubes = self.tm1.cubes.get_all()
control_cubes = self.tm1.cubes.get_control_cubes()
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertEqual(len(cubes), len(control_cubes + model_cubes))
def test_update_cube(self):
c = self.tm1.cubes.get(self.cube_name)
c.rules = Rules("SKIPCHECK;\nFEEDERS;")
self.tm1.cubes.update(c)
c = self.tm1.cubes.get(self.cube_name)
self.assertEqual(c.rules.text, "SKIPCHECK;\nFEEDERS;")
self.assertTrue(c.skipcheck)
def test_get_control_cubes(self):
control_cubes = self.tm1.cubes.get_control_cubes()
self.assertGreater(len(control_cubes), 0)
for cube in control_cubes:
self.assertTrue(cube.name.startswith("}"))
def test_get_model_cubes(self):
model_cubes = self.tm1.cubes.get_model_cubes()
self.assertGreater(len(model_cubes), 0)
for cube in model_cubes:
self.assertFalse(cube.name.startswith("}"))
def test_get_dimension_names(self):
dimension_names = self.tm1.cubes.get_dimension_names(self.cube_name)
self.assertEqual(dimension_names, self.dimension_names)
def test_get_random_intersection(self):
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=False)
self.assertNotEqual(intersection1, intersection2)
intersection1 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
intersection2 = self.tm1.cubes.get_random_intersection(cube_name=self.cube_name, unique_names=True)
self.assertNotEqual(intersection1, intersection2)
def test_exists(self):
self.assertTrue(self.tm1.cubes.exists(self.cube_name))
self.assertFalse(self.tm1.cubes.exists(uuid.uuid4()))
def test_create_delete_cube(self):
cube_name = PREFIX + "Some_Other_Name"
dimension_names = self.tm1.dimensions.get_all_names()[1:3]
cube = Cube(cube_name, dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.create(cube)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(
len(all_cubes_before) + 1,
len(all_cubes_after))
self.assertEqual(
self.tm1.cubes.get_dimension_names(cube_name),
dimension_names)
all_cubes_before = self.tm1.cubes.get_all_names()
self.tm1.cubes.delete(cube_name)
all_cubes_after = self.tm1.cubes.get_all_names()
self.assertEqual(len(all_cubes_before) - 1, len(all_cubes_after))
def test_get_storage_dimension_order(self):
dimensions = self.tm1.cubes.get_storage_dimension_order(cube_name=self.cube_name)
self.assertEqual(dimensions, self.dimension_names)
def test_update_storage_dimension_order(self):
self.tm1.cubes.update_storage_dimension_order(
cube_name=self.cube_name,
dimension_names=reversed(self.dimension_names))
dimensions = self.tm1.cubes.get_storage_dimension_order(self.cube_name)
self.assertEqual(
list(reversed(dimensions)),
self.dimension_names)
def test_load(self):
response = self.tm1.cubes.load(cube_name=self.cube_name)
self.assertTrue(response.ok)
def test_unload(self):
response = self.tm1.cubes.unload(cube_name=self.cube_name)
self.assertTrue(response.ok)
@classmethod
def tearDownClass(cls):
cls.tm1.cubes.delete(cls.cube_name)
for dimension in cls.dimension_names:
cls.tm1.dimensions.delete(dimension)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| true | true |
f72b4610811bde97505c92b313e84557e3fe0425 | 809 | py | Python | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | time_this_using_with.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from timeit import default_timer
class TimeThis:
def __init__(self, title="TimeThis"):
self.title = title
self.start_time = None
def __enter__(self):
self.start_time = default_timer()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
print('[{}] total time: {} sec'.format(self.title, default_timer() - self.start_time))
if __name__ == '__main__':
import time
with TimeThis():
time.sleep(1)
with TimeThis("Test"):
text = ''
for i in range(10 ** 6):
text += str(i)
with TimeThis("Test"):
items = []
for i in range(10 ** 6):
items.append(str(i))
text = ''.join(items)
| 19.731707 | 94 | 0.566131 |
__author__ = 'ipetrash'
from timeit import default_timer
class TimeThis:
def __init__(self, title="TimeThis"):
self.title = title
self.start_time = None
def __enter__(self):
self.start_time = default_timer()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
print('[{}] total time: {} sec'.format(self.title, default_timer() - self.start_time))
if __name__ == '__main__':
import time
with TimeThis():
time.sleep(1)
with TimeThis("Test"):
text = ''
for i in range(10 ** 6):
text += str(i)
with TimeThis("Test"):
items = []
for i in range(10 ** 6):
items.append(str(i))
text = ''.join(items)
| true | true |
f72b46c25c47804afbeb964bc15146379d6938f8 | 49,448 | py | Python | lib/sqlalchemy/ext/associationproxy.py | mattkohl/sqlalchemy | edf8e782cf5011cd43a0ee281b9e0b1d1becef1f | [
"MIT"
] | 2 | 2020-02-19T17:50:50.000Z | 2021-02-10T02:52:41.000Z | lib/sqlalchemy/ext/associationproxy.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | null | null | null | lib/sqlalchemy/ext/associationproxy.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | 1 | 2019-08-27T06:57:57.000Z | 2019-08-27T06:57:57.000Z | # ext/associationproxy.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import operator
from .. import exc
from .. import inspect
from .. import orm
from .. import util
from ..orm import collections
from ..orm import interfaces
from ..sql import or_
from ..sql.operators import ColumnOperators
def association_proxy(target_collection, attr, **kw):
r"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY")
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
"""
class AssociationProxy(interfaces.InspectionAttrInfo):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = True
extension_type = ASSOCIATION_PROXY
def __init__(
self,
target_collection,
attr,
creator=None,
getset_factory=None,
proxy_factory=None,
proxy_bulk_set=None,
info=None,
cascade_scalar_deletes=False,
):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param cascade_scalar_deletes: when True, indicates that setting
the proxied value to ``None``, or deleting it via ``del``, should
also remove the source object. Only applies to scalar attributes.
Normally, removing the proxied target will not remove the proxy
source, as this object may have other state that is still to be
kept.
.. versionadded:: 1.3
.. seealso::
:ref:`cascade_scalar_deletes` - complete usage example
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
:param info: optional, will be assigned to
:attr:`.AssociationProxy.info` if present.
.. versionadded:: 1.0.9
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.cascade_scalar_deletes = cascade_scalar_deletes
self.key = "_%s_%s_%s" % (
type(self).__name__,
target_collection,
id(self),
)
if info:
self.info = info
def __get__(self, obj, class_):
if class_ is None:
return self
inst = self._as_instance(class_, obj)
if inst:
return inst.get(obj)
# obj has to be None here
# assert obj is None
return self
def __set__(self, obj, values):
class_ = type(obj)
return self._as_instance(class_, obj).set(obj, values)
def __delete__(self, obj):
class_ = type(obj)
return self._as_instance(class_, obj).delete(obj)
def for_class(self, class_, obj=None):
r"""Return the internal state local to a specific mapped class.
E.g., given a class ``User``::
class User(Base):
# ...
keywords = association_proxy('kws', 'keyword')
If we access this :class:`.AssociationProxy` from
:attr:`.Mapper.all_orm_descriptors`, and we want to view the
target class for this proxy as mapped by ``User``::
inspect(User).all_orm_descriptors["keywords"].for_class(User).target_class
This returns an instance of :class:`.AssociationProxyInstance` that
is specific to the ``User`` class. The :class:`.AssociationProxy`
object remains agnostic of its parent class.
:param class\_: the class that we are returning state for.
:param obj: optional, an instance of the class that is required
if the attribute refers to a polymorphic target, e.g. where we have
to look at the type of the actual destination object to get the
complete path.
.. versionadded:: 1.3 - :class:`.AssociationProxy` no longer stores
any state specific to a particular parent class; the state is now
stored in per-class :class:`.AssociationProxyInstance` objects.
"""
return self._as_instance(class_, obj)
def _as_instance(self, class_, obj):
try:
inst = class_.__dict__[self.key + "_inst"]
except KeyError:
owner = self._calc_owner(class_)
if owner is not None:
inst = AssociationProxyInstance.for_proxy(self, owner, obj)
setattr(class_, self.key + "_inst", inst)
else:
inst = None
if inst is not None and not inst._is_canonical:
# the AssociationProxyInstance can't be generalized
# since the proxied attribute is not on the targeted
# class, only on subclasses of it, which might be
# different. only return for the specific
# object's current value
return inst._non_canonical_get_for_object(obj)
else:
return inst
def _calc_owner(self, target_cls):
# we might be getting invoked for a subclass
# that is not mapped yet, in some declarative situations.
# save until we are mapped
try:
insp = inspect(target_cls)
except exc.NoInspectionAvailable:
# can't find a mapper, don't set owner. if we are a not-yet-mapped
# subclass, we can also scan through __mro__ to find a mapped
# class, but instead just wait for us to be called again against a
# mapped class normally.
return None
else:
return insp.mapper.class_manager.class_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
setattr(o, attr, v)
else:
def setter(o, v):
setattr(o, attr, v)
return getter, setter
def __repr__(self):
return "AssociationProxy(%r, %r)" % (
self.target_collection,
self.value_attr,
)
class AssociationProxyInstance(object):
"""A per-class object that serves class- and object-specific results.
This is used by :class:`.AssociationProxy` when it is invoked
in terms of a specific class or instance of a class, i.e. when it is
used as a regular Python descriptor.
When referring to the :class:`.AssociationProxy` as a normal Python
descriptor, the :class:`.AssociationProxyInstance` is the object that
actually serves the information. Under normal circumstances, its presence
is transparent::
>>> User.keywords.scalar
False
In the special case that the :class:`.AssociationProxy` object is being
accessed directly, in order to get an explicit handle to the
:class:`.AssociationProxyInstance`, use the
:meth:`.AssociationProxy.for_class` method::
proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User)
# view if proxy object is scalar or not
>>> proxy_state.scalar
False
.. versionadded:: 1.3
""" # noqa
def __init__(self, parent, owning_class, target_class, value_attr):
self.parent = parent
self.key = parent.key
self.owning_class = owning_class
self.target_collection = parent.target_collection
self.collection_class = None
self.target_class = target_class
self.value_attr = value_attr
target_class = None
"""The intermediary class handled by this
:class:`.AssociationProxyInstance`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
@classmethod
def for_proxy(cls, parent, owning_class, parent_instance):
target_collection = parent.target_collection
value_attr = parent.value_attr
prop = orm.class_mapper(owning_class).get_property(target_collection)
# this was never asserted before but this should be made clear.
if not isinstance(prop, orm.RelationshipProperty):
raise NotImplementedError(
"association proxy to a non-relationship "
"intermediary is not supported"
)
target_class = prop.mapper.class_
try:
target_assoc = cls._cls_unwrap_target_assoc_proxy(
target_class, value_attr
)
except AttributeError:
# the proxied attribute doesn't exist on the target class;
# return an "ambiguous" instance that will work on a per-object
# basis
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return cls._construct_for_assoc(
target_assoc, parent, owning_class, target_class, value_attr
)
@classmethod
def _construct_for_assoc(
cls, target_assoc, parent, owning_class, target_class, value_attr
):
if target_assoc is not None:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
attr = getattr(target_class, value_attr)
if not hasattr(attr, "_is_internal_proxy"):
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
is_object = attr._impl_uses_objects
if is_object:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return ColumnAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
@property
def _comparator(self):
return self._get_property().comparator
@classmethod
def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr):
attr = getattr(target_class, value_attr)
if isinstance(attr, (AssociationProxy, AssociationProxyInstance)):
return attr
return None
@util.memoized_property
def _unwrap_target_assoc_proxy(self):
return self._cls_unwrap_target_assoc_proxy(
self.target_class, self.value_attr
)
@property
def remote_attr(self):
"""The 'remote' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' class attribute referenced by this
:class:`.AssociationProxyInstance`.
.. seealso::
:attr:`.AssociationProxyInstance.attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. seealso::
:attr:`.AssociationProxyInstance.local_attr`
:attr:`.AssociationProxyInstance.remote_attr`
"""
return (self.local_attr, self.remote_attr)
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxyInstance`
proxies a scalar relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return (
not self._get_property()
.mapper.get_property(self.value_attr)
.uselist
)
@property
def _target_is_object(self):
raise NotImplementedError()
def _initialize_scalar_accessors(self):
if self.parent.getset_factory:
get, set_ = self.parent.getset_factory(None, self)
else:
get, set_ = self.parent._default_getset(None)
self._scalar_get, self._scalar_set = get, set_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
return setattr(o, attr, v)
else:
def setter(o, v):
return setattr(o, attr, v)
return getter, setter
@property
def info(self):
return self.parent.info
def get(self, obj):
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, self_id, proxy = getattr(obj, self.key)
except AttributeError:
pass
else:
if id(obj) == creator_id and id(self) == self_id:
assert self.collection_class is not None
return proxy
self.collection_class, proxy = self._new(
_lazy_collection(obj, self.target_collection)
)
setattr(obj, self.key, (id(obj), id(self), proxy))
return proxy
def set(self, obj, values):
if self.scalar:
creator = (
self.parent.creator
if self.parent.creator
else self.target_class
)
target = getattr(obj, self.target_collection)
if target is None:
if values is None:
return
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
if values is None and self.parent.cascade_scalar_deletes:
setattr(obj, self.target_collection, None)
else:
proxy = self.get(obj)
assert self.collection_class is not None
if proxy is not values:
proxy._bulk_replace(self, values)
def delete(self, obj):
if self.owning_class is None:
self._calc_owner(obj, None)
if self.scalar:
target = getattr(obj, self.target_collection)
if target is not None:
delattr(target, self.value_attr)
delattr(obj, self.target_collection)
def _new(self, lazy_collection):
creator = (
self.parent.creator if self.parent.creator else self.target_class
)
collection_class = util.duck_type_collection(lazy_collection())
if self.parent.proxy_factory:
return (
collection_class,
self.parent.proxy_factory(
lazy_collection, creator, self.value_attr, self
),
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(collection_class, self)
else:
getter, setter = self.parent._default_getset(collection_class)
if collection_class is list:
return (
collection_class,
_AssociationList(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is dict:
return (
collection_class,
_AssociationDict(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is set:
return (
collection_class,
_AssociationSet(
lazy_collection, creator, getter, setter, self
),
)
else:
raise exc.ArgumentError(
"could not guess which interface to use for "
'collection_class "%s" backing "%s"; specify a '
"proxy_factory and proxy_bulk_set manually"
% (self.collection_class.__name__, self.target_collection)
)
def _set(self, proxy, values):
if self.parent.proxy_bulk_set:
self.parent.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
"no proxy_bulk_set supplied for custom "
"collection_class implementation"
)
def _inflate(self, proxy):
creator = (
self.parent.creator and self.parent.creator or self.target_class
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(
self.collection_class, self
)
else:
getter, setter = self.parent._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _criterion_exists(self, criterion=None, **kwargs):
is_has = kwargs.pop("is_has", None)
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
inner = target_assoc._criterion_exists(
criterion=criterion, **kwargs
)
return self._comparator._criterion_exists(inner)
if self._target_is_object:
prop = getattr(self.target_class, self.value_attr)
value_expr = prop._criterion_exists(criterion, **kwargs)
else:
if kwargs:
raise exc.ArgumentError(
"Can't apply keyword arguments to column-targeted "
"association proxy; use =="
)
elif is_has and criterion is not None:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use =="
)
value_expr = criterion
return self._comparator._criterion_exists(value_expr)
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
self.scalar
and (not self._target_is_object or self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'any()' not implemented for scalar " "attributes. Use has()."
)
return self._criterion_exists(
criterion=criterion, is_has=False, **kwargs
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._unwrap_target_assoc_proxy is None and (
not self.scalar
or (self._target_is_object and not self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(
criterion=criterion, is_has=True, **kwargs
)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.parent)
class AmbiguousAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` where we cannot determine
the type of target object.
"""
_is_canonical = False
def _ambiguous(self):
raise AttributeError(
"Association proxy %s.%s refers to an attribute '%s' that is not "
"directly mapped on class %s; therefore this operation cannot "
"proceed since we don't know what type of object is referred "
"towards"
% (
self.owning_class.__name__,
self.target_collection,
self.value_attr,
self.target_class,
)
)
def get(self, obj):
if obj is None:
return self
else:
return super(AmbiguousAssociationProxyInstance, self).get(obj)
def __eq__(self, obj):
self._ambiguous()
def __ne__(self, obj):
self._ambiguous()
def any(self, criterion=None, **kwargs):
self._ambiguous()
def has(self, criterion=None, **kwargs):
self._ambiguous()
@util.memoized_property
def _lookup_cache(self):
# mapping of <subclass>->AssociationProxyInstance.
# e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist;
# only B1(B) and B2(B) have "b_attr", keys in here would be B1, B2
return {}
def _non_canonical_get_for_object(self, parent_instance):
if parent_instance is not None:
actual_obj = getattr(parent_instance, self.target_collection)
if actual_obj is not None:
try:
insp = inspect(actual_obj)
except exc.NoInspectionAvailable:
pass
else:
mapper = insp.mapper
instance_class = mapper.class_
if instance_class not in self._lookup_cache:
self._populate_cache(instance_class, mapper)
try:
return self._lookup_cache[instance_class]
except KeyError:
pass
# no object or ambiguous object given, so return "self", which
# is a proxy with generally only instance-level functionality
return self
def _populate_cache(self, instance_class, mapper):
prop = orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
if mapper.isa(prop.mapper):
target_class = instance_class
try:
target_assoc = self._cls_unwrap_target_assoc_proxy(
target_class, self.value_attr
)
except AttributeError:
pass
else:
self._lookup_cache[instance_class] = self._construct_for_assoc(
target_assoc,
self.parent,
self.owning_class,
target_class,
self.value_attr,
)
class ObjectAssociationProxyInstance(AssociationProxyInstance):
"""an :class:`.AssociationProxyInstance` that has an object as a target.
"""
_target_is_object = True
_is_canonical = True
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
return self._comparator._criterion_exists(
target_assoc.contains(obj)
if not target_assoc.scalar
else target_assoc == obj
)
elif (
self._target_is_object
and self.scalar
and not self._value_is_scalar
):
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
elif self._target_is_object and self.scalar and self._value_is_scalar:
raise exc.InvalidRequestError(
"contains() doesn't apply to a scalar object endpoint; use =="
)
else:
return self._comparator._criterion_exists(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None,
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj
)
class ColumnAssociationProxyInstance(
ColumnOperators, AssociationProxyInstance
):
"""an :class:`.AssociationProxyInstance` that has a database column as a
target.
"""
_target_is_object = False
_is_canonical = True
def __eq__(self, other):
# special case "is None" to check for no related row as well
expr = self._criterion_exists(
self.remote_attr.operate(operator.eq, other)
)
if other is None:
return or_(expr, self._comparator == None)
else:
return expr
def operate(self, op, *other, **kwargs):
return self._criterion_exists(
self.remote_attr.operate(op, *other, **kwargs)
)
class _lazy_collection(object):
def __init__(self, obj, target):
self.parent = obj
self.target = target
def __call__(self):
return getattr(self.parent, self.target)
def __getstate__(self):
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state):
self.parent = state["obj"]
self.target = state["target"]
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state):
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def _bulk_replace(self, assoc_proxy, values):
self.clear()
assoc_proxy._set(self, values)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, value):
return self.setter(object_, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
return
def append(self, value):
col = self.col
item = self._create(value)
col.append(item)
def count(self, value):
return sum(
[
1
for _ in util.itertools_filter(
lambda v: v == value, iter(self)
)
]
)
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0 : len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return util.cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def index(self, item, *args):
return list(self).index(item, *args)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol("_NotProvided")
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, key, value):
return self.setter(object_, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return util.cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError(
"update expected at most 1 arguments, got %i" % len(a)
)
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, "keys"):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples"
)
for key, value in kw:
self[key] = value
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
for key, member in values.items() or ():
if key in additions:
self[key] = member
elif key in constants:
self[key] = member
for key in removals:
del self[key]
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(dict, func_name)
):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError("pop from an empty set")
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
appender = self.add
remover = self.remove
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member)
for member in removals:
remover(member)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(set, func_name)
):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| 31.217172 | 86 | 0.596809 |
import operator
from .. import exc
from .. import inspect
from .. import orm
from .. import util
from ..orm import collections
from ..orm import interfaces
from ..sql import or_
from ..sql.operators import ColumnOperators
def association_proxy(target_collection, attr, **kw):
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol("ASSOCIATION_PROXY")
class AssociationProxy(interfaces.InspectionAttrInfo):
is_attribute = True
extension_type = ASSOCIATION_PROXY
def __init__(
self,
target_collection,
attr,
creator=None,
getset_factory=None,
proxy_factory=None,
proxy_bulk_set=None,
info=None,
cascade_scalar_deletes=False,
):
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.cascade_scalar_deletes = cascade_scalar_deletes
self.key = "_%s_%s_%s" % (
type(self).__name__,
target_collection,
id(self),
)
if info:
self.info = info
def __get__(self, obj, class_):
if class_ is None:
return self
inst = self._as_instance(class_, obj)
if inst:
return inst.get(obj)
return self
def __set__(self, obj, values):
class_ = type(obj)
return self._as_instance(class_, obj).set(obj, values)
def __delete__(self, obj):
class_ = type(obj)
return self._as_instance(class_, obj).delete(obj)
def for_class(self, class_, obj=None):
return self._as_instance(class_, obj)
def _as_instance(self, class_, obj):
try:
inst = class_.__dict__[self.key + "_inst"]
except KeyError:
owner = self._calc_owner(class_)
if owner is not None:
inst = AssociationProxyInstance.for_proxy(self, owner, obj)
setattr(class_, self.key + "_inst", inst)
else:
inst = None
if inst is not None and not inst._is_canonical:
# since the proxied attribute is not on the targeted
# class, only on subclasses of it, which might be
# different. only return for the specific
# object's current value
return inst._non_canonical_get_for_object(obj)
else:
return inst
def _calc_owner(self, target_cls):
try:
insp = inspect(target_cls)
except exc.NoInspectionAvailable:
return None
else:
return insp.mapper.class_manager.class_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
setattr(o, attr, v)
else:
def setter(o, v):
setattr(o, attr, v)
return getter, setter
def __repr__(self):
return "AssociationProxy(%r, %r)" % (
self.target_collection,
self.value_attr,
)
class AssociationProxyInstance(object):
def __init__(self, parent, owning_class, target_class, value_attr):
self.parent = parent
self.key = parent.key
self.owning_class = owning_class
self.target_collection = parent.target_collection
self.collection_class = None
self.target_class = target_class
self.value_attr = value_attr
target_class = None
@classmethod
def for_proxy(cls, parent, owning_class, parent_instance):
target_collection = parent.target_collection
value_attr = parent.value_attr
prop = orm.class_mapper(owning_class).get_property(target_collection)
if not isinstance(prop, orm.RelationshipProperty):
raise NotImplementedError(
"association proxy to a non-relationship "
"intermediary is not supported"
)
target_class = prop.mapper.class_
try:
target_assoc = cls._cls_unwrap_target_assoc_proxy(
target_class, value_attr
)
except AttributeError:
# return an "ambiguous" instance that will work on a per-object
# basis
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return cls._construct_for_assoc(
target_assoc, parent, owning_class, target_class, value_attr
)
@classmethod
def _construct_for_assoc(
cls, target_assoc, parent, owning_class, target_class, value_attr
):
if target_assoc is not None:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
attr = getattr(target_class, value_attr)
if not hasattr(attr, "_is_internal_proxy"):
return AmbiguousAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
is_object = attr._impl_uses_objects
if is_object:
return ObjectAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
else:
return ColumnAssociationProxyInstance(
parent, owning_class, target_class, value_attr
)
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
@property
def _comparator(self):
return self._get_property().comparator
@classmethod
def _cls_unwrap_target_assoc_proxy(cls, target_class, value_attr):
attr = getattr(target_class, value_attr)
if isinstance(attr, (AssociationProxy, AssociationProxyInstance)):
return attr
return None
@util.memoized_property
def _unwrap_target_assoc_proxy(self):
return self._cls_unwrap_target_assoc_proxy(
self.target_class, self.value_attr
)
@property
def remote_attr(self):
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
return (self.local_attr, self.remote_attr)
@util.memoized_property
def scalar(self):
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return (
not self._get_property()
.mapper.get_property(self.value_attr)
.uselist
)
@property
def _target_is_object(self):
raise NotImplementedError()
def _initialize_scalar_accessors(self):
if self.parent.getset_factory:
get, set_ = self.parent.getset_factory(None, self)
else:
get, set_ = self.parent._default_getset(None)
self._scalar_get, self._scalar_set = get, set_
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
def getter(target):
return _getter(target) if target is not None else None
if collection_class is dict:
def setter(o, k, v):
return setattr(o, attr, v)
else:
def setter(o, v):
return setattr(o, attr, v)
return getter, setter
@property
def info(self):
return self.parent.info
def get(self, obj):
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, self_id, proxy = getattr(obj, self.key)
except AttributeError:
pass
else:
if id(obj) == creator_id and id(self) == self_id:
assert self.collection_class is not None
return proxy
self.collection_class, proxy = self._new(
_lazy_collection(obj, self.target_collection)
)
setattr(obj, self.key, (id(obj), id(self), proxy))
return proxy
def set(self, obj, values):
if self.scalar:
creator = (
self.parent.creator
if self.parent.creator
else self.target_class
)
target = getattr(obj, self.target_collection)
if target is None:
if values is None:
return
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
if values is None and self.parent.cascade_scalar_deletes:
setattr(obj, self.target_collection, None)
else:
proxy = self.get(obj)
assert self.collection_class is not None
if proxy is not values:
proxy._bulk_replace(self, values)
def delete(self, obj):
if self.owning_class is None:
self._calc_owner(obj, None)
if self.scalar:
target = getattr(obj, self.target_collection)
if target is not None:
delattr(target, self.value_attr)
delattr(obj, self.target_collection)
def _new(self, lazy_collection):
creator = (
self.parent.creator if self.parent.creator else self.target_class
)
collection_class = util.duck_type_collection(lazy_collection())
if self.parent.proxy_factory:
return (
collection_class,
self.parent.proxy_factory(
lazy_collection, creator, self.value_attr, self
),
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(collection_class, self)
else:
getter, setter = self.parent._default_getset(collection_class)
if collection_class is list:
return (
collection_class,
_AssociationList(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is dict:
return (
collection_class,
_AssociationDict(
lazy_collection, creator, getter, setter, self
),
)
elif collection_class is set:
return (
collection_class,
_AssociationSet(
lazy_collection, creator, getter, setter, self
),
)
else:
raise exc.ArgumentError(
"could not guess which interface to use for "
'collection_class "%s" backing "%s"; specify a '
"proxy_factory and proxy_bulk_set manually"
% (self.collection_class.__name__, self.target_collection)
)
def _set(self, proxy, values):
if self.parent.proxy_bulk_set:
self.parent.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
"no proxy_bulk_set supplied for custom "
"collection_class implementation"
)
def _inflate(self, proxy):
creator = (
self.parent.creator and self.parent.creator or self.target_class
)
if self.parent.getset_factory:
getter, setter = self.parent.getset_factory(
self.collection_class, self
)
else:
getter, setter = self.parent._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _criterion_exists(self, criterion=None, **kwargs):
is_has = kwargs.pop("is_has", None)
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
inner = target_assoc._criterion_exists(
criterion=criterion, **kwargs
)
return self._comparator._criterion_exists(inner)
if self._target_is_object:
prop = getattr(self.target_class, self.value_attr)
value_expr = prop._criterion_exists(criterion, **kwargs)
else:
if kwargs:
raise exc.ArgumentError(
"Can't apply keyword arguments to column-targeted "
"association proxy; use =="
)
elif is_has and criterion is not None:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use =="
)
value_expr = criterion
return self._comparator._criterion_exists(value_expr)
def any(self, criterion=None, **kwargs):
if self._unwrap_target_assoc_proxy is None and (
self.scalar
and (not self._target_is_object or self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'any()' not implemented for scalar " "attributes. Use has()."
)
return self._criterion_exists(
criterion=criterion, is_has=False, **kwargs
)
def has(self, criterion=None, **kwargs):
if self._unwrap_target_assoc_proxy is None and (
not self.scalar
or (self._target_is_object and not self._value_is_scalar)
):
raise exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(
criterion=criterion, is_has=True, **kwargs
)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.parent)
class AmbiguousAssociationProxyInstance(AssociationProxyInstance):
_is_canonical = False
def _ambiguous(self):
raise AttributeError(
"Association proxy %s.%s refers to an attribute '%s' that is not "
"directly mapped on class %s; therefore this operation cannot "
"proceed since we don't know what type of object is referred "
"towards"
% (
self.owning_class.__name__,
self.target_collection,
self.value_attr,
self.target_class,
)
)
def get(self, obj):
if obj is None:
return self
else:
return super(AmbiguousAssociationProxyInstance, self).get(obj)
def __eq__(self, obj):
self._ambiguous()
def __ne__(self, obj):
self._ambiguous()
def any(self, criterion=None, **kwargs):
self._ambiguous()
def has(self, criterion=None, **kwargs):
self._ambiguous()
@util.memoized_property
def _lookup_cache(self):
# mapping of <subclass>->AssociationProxyInstance.
# e.g. proxy is A-> A.b -> B -> B.b_attr, but B.b_attr doesn't exist;
return {}
def _non_canonical_get_for_object(self, parent_instance):
if parent_instance is not None:
actual_obj = getattr(parent_instance, self.target_collection)
if actual_obj is not None:
try:
insp = inspect(actual_obj)
except exc.NoInspectionAvailable:
pass
else:
mapper = insp.mapper
instance_class = mapper.class_
if instance_class not in self._lookup_cache:
self._populate_cache(instance_class, mapper)
try:
return self._lookup_cache[instance_class]
except KeyError:
pass
return self
def _populate_cache(self, instance_class, mapper):
prop = orm.class_mapper(self.owning_class).get_property(
self.target_collection
)
if mapper.isa(prop.mapper):
target_class = instance_class
try:
target_assoc = self._cls_unwrap_target_assoc_proxy(
target_class, self.value_attr
)
except AttributeError:
pass
else:
self._lookup_cache[instance_class] = self._construct_for_assoc(
target_assoc,
self.parent,
self.owning_class,
target_class,
self.value_attr,
)
class ObjectAssociationProxyInstance(AssociationProxyInstance):
_target_is_object = True
_is_canonical = True
def contains(self, obj):
target_assoc = self._unwrap_target_assoc_proxy
if target_assoc is not None:
return self._comparator._criterion_exists(
target_assoc.contains(obj)
if not target_assoc.scalar
else target_assoc == obj
)
elif (
self._target_is_object
and self.scalar
and not self._value_is_scalar
):
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
elif self._target_is_object and self.scalar and self._value_is_scalar:
raise exc.InvalidRequestError(
"contains() doesn't apply to a scalar object endpoint; use =="
)
else:
return self._comparator._criterion_exists(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None,
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj
)
class ColumnAssociationProxyInstance(
ColumnOperators, AssociationProxyInstance
):
_target_is_object = False
_is_canonical = True
def __eq__(self, other):
# special case "is None" to check for no related row as well
expr = self._criterion_exists(
self.remote_attr.operate(operator.eq, other)
)
if other is None:
return or_(expr, self._comparator == None)
else:
return expr
def operate(self, op, *other, **kwargs):
return self._criterion_exists(
self.remote_attr.operate(op, *other, **kwargs)
)
class _lazy_collection(object):
def __init__(self, obj, target):
self.parent = obj
self.target = target
def __call__(self):
return getattr(self.parent, self.target)
def __getstate__(self):
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state):
self.parent = state["obj"]
self.target = state["target"]
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {"parent": self.parent, "lazy_collection": self.lazy_collection}
def __setstate__(self, state):
self.parent = state["parent"]
self.lazy_collection = state["lazy_collection"]
self.parent._inflate(self)
def _bulk_replace(self, assoc_proxy, values):
self.clear()
assoc_proxy._set(self, values)
class _AssociationList(_AssociationCollection):
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, value):
return self.setter(object_, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value), len(rng))
)
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
for member in self.col:
yield self._get(member)
return
def append(self, value):
col = self.col
item = self._create(value)
col.append(item)
def count(self, value):
return sum(
[
1
for _ in util.itertools_filter(
lambda v: v == value, iter(self)
)
]
)
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
raise NotImplementedError
def sort(self):
raise NotImplementedError
def clear(self):
del self.col[0 : len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return util.cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def index(self, item, *args):
return list(self).index(item, *args)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(list, func_name)
):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol("_NotProvided")
class _AssociationDict(_AssociationCollection):
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object_):
return self.getter(object_)
def _set(self, object_, key, value):
return self.setter(object_, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return util.cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError(
"update expected at most 1 arguments, got %i" % len(a)
)
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, "keys"):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples"
)
for key, value in kw:
self[key] = value
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
for key, member in values.items() or ():
if key in additions:
self[key] = member
elif key in constants:
self[key] = member
for key in removals:
del self[key]
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(dict, func_name)
):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
def _create(self, value):
return self.creator(value)
def _get(self, object_):
return self.getter(object_)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError("pop from an empty set")
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def _bulk_replace(self, assoc_proxy, values):
existing = set(self)
constants = existing.intersection(values or ())
additions = set(values or ()).difference(constants)
removals = existing.difference(constants)
appender = self.add
remover = self.remove
for member in values or ():
if member in additions:
appender(member)
elif member in constants:
appender(member)
for member in removals:
remover(member)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (
callable(func)
and func.__name__ == func_name
and not func.__doc__
and hasattr(set, func_name)
):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
| true | true |
f72b47b083e75e7fc1652ef55122a7c099974a7c | 2,333 | py | Python | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | 1 | 2022-01-16T01:15:21.000Z | 2022-01-16T01:15:21.000Z | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | null | null | null | integreat_cms/cms/views/settings/mfa/authenticate_modify_mfa_view.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
] | null | null | null | """
This module contains all views related to multi-factor authentication
"""
import logging
import time
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import FormView
from ....forms import AuthenticationForm
logger = logging.getLogger(__name__)
@method_decorator(login_required, name="dispatch")
class AuthenticateModifyMfaView(FormView):
"""
View to authenticate a user before changing the mfa settings
"""
#: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)
template_name = "settings/mfa/authenticate.html"
#: The form class for this form view (see :class:`~django.views.generic.edit.FormMixin`)
form_class = AuthenticationForm
#: The URL to redirect to when the form is successfully processed (see :class:`~django.views.generic.edit.FormMixin`)
success_url = reverse_lazy("register_new_mfa_key")
def form_valid(self, form):
"""
This function overwrites :meth:`~django.views.generic.edit.FormMixin.form_valid` which is called if the
:class:`~integreat_cms.cms.forms.users.authentication_form.AuthenticationForm` is valid. In case the user provided correct credentials,
the current time is saved in a session variable so a timeout of the authentication can be implemented.
:param form: Authentication form
:type form: ~integreat_cms.cms.forms.users.authentication_form.AuthenticationForm
:return: Redirect user to mfa login view or to :attr:`~integreat_cms.core.settings.LOGIN_REDIRECT_URL`
:rtype: ~django.http.HttpResponseRedirect
"""
if check_password(form.cleaned_data["password"], self.request.user.password):
self.request.session["modify_mfa_authentication_time"] = time.time()
if "mfa_redirect_url" in self.request.session:
return redirect(self.request.session["mfa_redirect_url"])
return super().form_valid(form)
form.add_error("password", _("The provided password is not correct"))
return super().form_invalid(form)
| 44.865385 | 143 | 0.743678 | import logging
import time
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic import FormView
from ....forms import AuthenticationForm
logger = logging.getLogger(__name__)
@method_decorator(login_required, name="dispatch")
class AuthenticateModifyMfaView(FormView):
template_name = "settings/mfa/authenticate.html"
form_class = AuthenticationForm
success_url = reverse_lazy("register_new_mfa_key")
def form_valid(self, form):
if check_password(form.cleaned_data["password"], self.request.user.password):
self.request.session["modify_mfa_authentication_time"] = time.time()
if "mfa_redirect_url" in self.request.session:
return redirect(self.request.session["mfa_redirect_url"])
return super().form_valid(form)
form.add_error("password", _("The provided password is not correct"))
return super().form_invalid(form)
| true | true |
f72b482966239f05aa2cebf8fa85221da508dbeb | 44 | py | Python | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | CodeUp/6030.py | chae-heechan/Algorithm_Study | 183a77e2cfe352cd82fb5e988b493082529a73dd | [
"MIT"
] | null | null | null | # 영문자 1개 입력받아 10진수로 변환하기
print(ord(input())) | 22 | 24 | 0.704545 |
print(ord(input())) | true | true |
f72b491d57302f379d5a1bd917da7dd51854de51 | 11,032 | py | Python | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | 1 | 2020-02-24T10:21:37.000Z | 2020-02-24T10:21:37.000Z | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | null | null | null | allennlp/tests/common/params_test.py | craigbig/allennlp | 3f15a8bdcae366f3ef732eec1e9df26d91521582 | [
"Apache-2.0"
] | null | null | null | import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
# should do nothing to a non-flat dictionary
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
# incompatibility is ok
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
# goes deep
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
# Check roundtripping
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
# raises without environment variable set
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
# Our configs use environment variable substitution, and the _jsonnet parser
# will fail if we don't pass it correct environment variables.
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
| 35.818182 | 100 | 0.562817 | import json
import os
import re
from collections import OrderedDict
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import infer_and_cast, Params, parse_overrides, unflatten, with_fallback
from allennlp.common.testing import AllenNlpTestCase
class TestParams(AllenNlpTestCase):
def test_load_from_file(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
params = Params.from_file(filename)
assert "dataset_reader" in params
assert "trainer" in params
model_params = params.pop("model")
assert model_params.pop("type") == "simple_tagger"
def test_replace_none(self):
params = Params({"a": "None", "b": [1.0, "None", 2], "c": {"d": "None"}})
assert params["a"] is None
assert params["b"][1] is None
assert params["c"]["d"] is None
def test_bad_unicode_environment_variables(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
os.environ["BAD_ENVIRONMENT_VARIABLE"] = "\udce2"
Params.from_file(filename)
del os.environ["BAD_ENVIRONMENT_VARIABLE"]
def test_overrides(self):
filename = self.FIXTURES_ROOT / "simple_tagger" / "experiment.json"
overrides = (
'{ "train_data_path": "FOO", "model": { "type": "BAR" },'
'"model.text_field_embedder.tokens.type": "BAZ",'
'"iterator.sorting_keys.0.0": "question"}'
)
params = Params.from_file(filename, overrides)
assert "dataset_reader" in params
assert "trainer" in params
assert params["train_data_path"] == "FOO"
assert params["iterator"]["sorting_keys"][0][0] == "question"
model_params = params.pop("model")
assert model_params.pop("type") == "BAR"
assert model_params["text_field_embedder"]["tokens"]["type"] == "BAZ"
def test_unflatten(self):
flattened = {"a.b.c": 1, "a.b.d": 0, "a.e.f.g.h": 2, "b": 3}
unflattened = unflatten(flattened)
assert unflattened == {"a": {"b": {"c": 1, "d": 0}, "e": {"f": {"g": {"h": 2}}}}, "b": 3}
assert unflatten(unflattened) == unflattened
def test_with_fallback(self):
preferred = {"a": 1}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": 1, "b": 2}
preferred = {"a": {"c": 3}}
fallback = {"a": 0, "b": 2}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"a": {"c": 3}, "b": 2}
preferred = {"deep": {"a": 1}}
fallback = {"deep": {"a": 0, "b": 2}}
merged = with_fallback(preferred=preferred, fallback=fallback)
assert merged == {"deep": {"a": 1, "b": 2}}
def test_parse_overrides(self):
assert parse_overrides("") == {}
assert parse_overrides("{}") == {}
override_dict = parse_overrides('{"train_data": "/train", "trainer.num_epochs": 10}')
assert override_dict == {"train_data": "/train", "trainer": {"num_epochs": 10}}
params = with_fallback(
preferred=override_dict,
fallback={
"train_data": "/test",
"model": "simple_tagger",
"trainer": {"num_epochs": 100, "optimizer": "sgd"},
},
)
assert params == {
"train_data": "/train",
"model": "simple_tagger",
"trainer": {"num_epochs": 10, "optimizer": "sgd"},
}
def test_as_flat_dict(self):
params = Params({"a": 10, "b": {"c": 20, "d": "stuff"}}).as_flat_dict()
assert params == {"a": 10, "b.c": 20, "b.d": "stuff"}
def test_jsonnet_features(self):
config_file = self.TEST_DIR / "config.jsonnet"
with open(config_file, "w") as f:
f.write(
"""{
// This example is copied straight from the jsonnet docs
person1: {
name: "Alice",
welcome: "Hello " + self.name + "!",
},
person2: self.person1 { name: "Bob" },
}"""
)
params = Params.from_file(config_file)
alice = params.pop("person1")
bob = params.pop("person2")
assert alice.as_dict() == {"name": "Alice", "welcome": "Hello Alice!"}
assert bob.as_dict() == {"name": "Bob", "welcome": "Hello Bob!"}
params.assert_empty("TestParams")
def test_regexes_with_backslashes(self):
bad_regex = self.TEST_DIR / "bad_regex.jsonnet"
good_regex = self.TEST_DIR / "good_regex.jsonnet"
with open(bad_regex, "w") as f:
f.write(r'{"myRegex": "a\.b"}')
with open(good_regex, "w") as f:
f.write(r'{"myRegex": "a\\.b"}')
with pytest.raises(RuntimeError):
Params.from_file(bad_regex)
params = Params.from_file(good_regex)
regex = params["myRegex"]
assert re.match(regex, "a.b")
assert not re.match(regex, "a-b")
good_regex2 = self.TEST_DIR / "good_regex2.jsonnet"
with open(good_regex2, "w") as f:
f.write(json.dumps(params.as_dict()))
params2 = Params.from_file(good_regex2)
assert params.as_dict() == params2.as_dict()
def test_env_var_substitution(self):
substitutor = self.TEST_DIR / "substitutor.jsonnet"
key = "TEST_ENV_VAR_SUBSTITUTION"
assert os.environ.get(key) is None
with open(substitutor, "w") as f:
f.write(f'{{"path": std.extVar("{key}")}}')
with pytest.raises(RuntimeError):
Params.from_file(substitutor)
os.environ[key] = "PERFECT"
params = Params.from_file(substitutor)
assert params["path"] == "PERFECT"
del os.environ[key]
@pytest.mark.xfail(
not os.path.exists(AllenNlpTestCase.PROJECT_ROOT / "training_config"),
reason="Training configs not installed with pip",
)
def test_known_configs(self):
configs = os.listdir(self.PROJECT_ROOT / "training_config")
forced_variables = [
# constituency parser
"PTB_TRAIN_PATH",
"PTB_DEV_PATH",
"PTB_TEST_PATH",
# dependency parser
"PTB_DEPENDENCIES_TRAIN",
"PTB_DEPENDENCIES_VAL",
# multilingual dependency parser
"TRAIN_PATHNAME",
"DEV_PATHNAME",
"TEST_PATHNAME",
# srl_elmo_5.5B
"SRL_TRAIN_DATA_PATH",
"SRL_VALIDATION_DATA_PATH",
# coref
"COREF_TRAIN_DATA_PATH",
"COREF_DEV_DATA_PATH",
"COREF_TEST_DATA_PATH",
# ner
"NER_TRAIN_DATA_PATH",
"NER_TEST_A_PATH",
"NER_TEST_B_PATH",
# bidirectional lm
"BIDIRECTIONAL_LM_TRAIN_PATH",
"BIDIRECTIONAL_LM_VOCAB_PATH",
"BIDIRECTIONAL_LM_ARCHIVE_PATH",
]
for var in forced_variables:
os.environ[var] = os.environ.get(var) or str(self.TEST_DIR)
for config in configs:
try:
Params.from_file(self.PROJECT_ROOT / "training_config" / config)
except Exception as e:
raise AssertionError(f"unable to load params for {config}, because {e}")
for var in forced_variables:
if os.environ[var] == str(self.TEST_DIR):
del os.environ[var]
def test_as_ordered_dict(self):
# keyD > keyC > keyE; keyDA > keyDB; Next all other keys alphabetically
preference_orders = [["keyD", "keyC", "keyE"], ["keyDA", "keyDB"]]
params = Params(
{
"keyC": "valC",
"keyB": "valB",
"keyA": "valA",
"keyE": "valE",
"keyD": {"keyDB": "valDB", "keyDA": "valDA"},
}
)
ordered_params_dict = params.as_ordered_dict(preference_orders)
expected_ordered_params_dict = OrderedDict(
{
"keyD": {"keyDA": "valDA", "keyDB": "valDB"},
"keyC": "valC",
"keyE": "valE",
"keyA": "valA",
"keyB": "valB",
}
)
assert json.dumps(ordered_params_dict) == json.dumps(expected_ordered_params_dict)
def test_to_file(self):
# Test to_file works with or without preference orders
params_dict = {"keyA": "valA", "keyB": "valB"}
expected_ordered_params_dict = OrderedDict({"keyB": "valB", "keyA": "valA"})
params = Params(params_dict)
file_path = self.TEST_DIR / "config.jsonnet"
# check with preference orders
params.to_file(file_path, [["keyB", "keyA"]])
with open(file_path, "r") as handle:
ordered_params_dict = OrderedDict(json.load(handle))
assert json.dumps(expected_ordered_params_dict) == json.dumps(ordered_params_dict)
# check without preference orders doesn't give error
params.to_file(file_path)
def test_infer_and_cast(self):
lots_of_strings = {
"a": ["10", "1.3", "true"],
"b": {"x": 10, "y": "20.1", "z": "other things"},
"c": "just a string",
}
casted = {
"a": [10, 1.3, True],
"b": {"x": 10, "y": 20.1, "z": "other things"},
"c": "just a string",
}
assert infer_and_cast(lots_of_strings) == casted
contains_bad_data = {"x": 10, "y": int}
with pytest.raises(ValueError, match="cannot infer type"):
infer_and_cast(contains_bad_data)
params = Params(lots_of_strings)
assert params.as_dict() == lots_of_strings
assert params.as_dict(infer_type_and_cast=True) == casted
def test_pop_choice(self):
choices = ["my_model", "other_model"]
params = Params({"model": "my_model"})
assert params.pop_choice("model", choices) == "my_model"
params = Params({"model": "non_existent_model"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices)
params = Params({"model": "module.submodule.ModelName"})
assert params.pop_choice("model", "choices") == "module.submodule.ModelName"
params = Params({"model": "module.submodule.ModelName"})
with pytest.raises(ConfigurationError):
params.pop_choice("model", choices, allow_class_names=False)
| true | true |
f72b4933b2a73e756ab6c71f5af4eb81142adabf | 978 | py | Python | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 5 | 2017-04-07T06:23:04.000Z | 2019-11-19T00:52:34.000Z | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 16 | 2018-09-12T11:14:40.000Z | 2021-04-19T09:02:44.000Z | ecl/tests/functional/baremetal/test_stock.py | nttcom/eclsdk | 866bea3290806332f677344982f39cb8e5155480 | [
"Apache-2.0"
] | 14 | 2017-05-11T14:26:26.000Z | 2021-07-14T14:00:06.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from ecl.tests.functional import base
class TestStock(base.BaseFunctionalTest):
def test_01_get_stock(self):
stock = self.conn.baremetal.get_stock(
"44d4ce9e-cf3d-4853-bdc9-95680bf95668", "groupa"
)
self.assertIsInstance(stock.flavor_id, six.string_types)
self.assertIsInstance(stock.availability_zone, six.string_types)
self.assertIsInstance(stock.stock, bool)
| 37.615385 | 75 | 0.742331 |
import six
from ecl.tests.functional import base
class TestStock(base.BaseFunctionalTest):
def test_01_get_stock(self):
stock = self.conn.baremetal.get_stock(
"44d4ce9e-cf3d-4853-bdc9-95680bf95668", "groupa"
)
self.assertIsInstance(stock.flavor_id, six.string_types)
self.assertIsInstance(stock.availability_zone, six.string_types)
self.assertIsInstance(stock.stock, bool)
| true | true |
f72b4949f2249b81afca92d0ad7c2bcf75710a96 | 9,579 | py | Python | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | fudge/productData/distributions/distribution.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""Distribution class."""
from PoPs import IDs as IDsPoPsModule
from fudge import abstractClasses as abstractClassesModule
from xData import standards as standardsModule
from . import angular as angularModule
from . import energy as energyModule
from . import energyAngular as energyAngularModule
from . import energyAngularMC as energyAngularMCModule
from . import angularEnergyMC as angularEnergyMCModule
from . import KalbachMann as KalbachMannModule
from . import angularEnergy as angularEnergyModule
from . import LLNL_angularEnergy as LLNL_angularEnergyModule
from . import uncorrelated as uncorrelatedModule
from . import Legendre as LegendreModule
from . import photonScattering as photonScatteringModule
from . import reference as referenceModule
from . import multiGroup as multiGroupModule
from . import unspecified as unspecifiedModule
from . import branching3d as branching3dModule
# probably missing stuff from photonScattering.py.
__metaclass__ = type
class component( abstractClassesModule.component ) :
moniker = 'distribution'
def __init__( self ) :
abstractClassesModule.component.__init__( self, ( angularModule.form, angularModule.twoBodyForm,
KalbachMannModule.form,
energyAngularModule.form, energyAngularMCModule.form,
angularEnergyModule.form, angularEnergyMCModule.form,
LLNL_angularEnergyModule.LLNLAngularEnergyForm,
uncorrelatedModule.form, LegendreModule.form, referenceModule.form,
referenceModule.CoulombPlusNuclearElastic, referenceModule.thermalNeutronScatteringLaw,
photonScatteringModule.coherentPhotonScattering.form, photonScatteringModule.incoherentPhotonScattering.form,
multiGroupModule.form, unspecifiedModule.form, branching3dModule.form ) )
def energySpectrumAtEnergy( self, energyIn, frame, **kwargs ) :
"""Returns the energy spectrum in the lab frame for the specified incident energy."""
styleLabel = kwargs.get( 'styleLabel', self.evaluated.label )
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
if( frame == standardsModule.frames.centerOfMassToken ) :
if( form.productFrame == standardsModule.frames.labToken ) : form = None
else :
form = None
if( form is not None ) :
return( form.energySpectrumAtEnergy( energyIn, frame ) )
else :
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
print( ' WARNING: lab to center-of-mass translation not supported.' )
else :
print( ' WARNING: distribution "%s" does not have energySpectrumAtEnergy method.' % form.moniker )
print( ' %s' % self.toXLink( ) )
return( energyModule.XYs1d( axes = energyModule.defaultAxes( form.domainUnit ) ) )
def getSpectrumAtEnergy( self, energy ) :
"""This method is deprecated, use energySpectrumAtEnergy instead. Returns the energy spectrum for self at projectile energy."""
return( self.energySpectrumAtEnergy( energy, standardsModule.frames.labToken ) )
def calculateAverageProductData( self, style, indent = '', **kwargs ) :
form = style.findFormMatchingDerivedStyle( self )
if( form is None ) : raise Exception( 'No matching style' )
return( form.calculateAverageProductData( style, indent = indent, **kwargs ) )
def check( self, info ):
"""check all distribution forms"""
from fudge import warning
warnings = []
for form in self:
if info['isTwoBody']:
if( form.productFrame != standardsModule.frames.centerOfMassToken ) :
warnings.append( warning.wrong2BodyFrame( form ) )
if form.moniker not in (angularModule.twoBodyForm.moniker,
referenceModule.form.moniker,
referenceModule.CoulombPlusNuclearElastic.moniker,
unspecifiedModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, '2-body' ) )
else:
if form.moniker in (angularModule.twoBodyForm.moniker,
angularModule.form.moniker,
energyModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, 'N-body' ) )
def checkSubform( subform, contextMessage ):
distributionErrors = []
if hasattr(subform, 'domainMin') and (subform.domainMin, subform.domainMax) != info['crossSectionDomain']:
domain = (subform.domainMin, subform.domainMax)
# For gamma products, domainMin should be >= cross section start, upper bounds should match.
if( self.ancestor.id == IDsPoPsModule.photon ) :
startRatio = subform.domainMin / info['crossSectionDomain'][0]
endRatio = subform.domainMax / info['crossSectionDomain'][1]
if (startRatio < 1-standardsModule.floats.epsilon or endRatio < 1-standardsModule.floats.epsilon
or endRatio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
# For all other products, check lower and upper edges: only warn if they disagree by > eps
else:
for e1,e2 in zip(domain, info['crossSectionDomain']):
ratio = e1 / e2
if (ratio < 1-standardsModule.floats.epsilon or ratio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
break
if not hasattr(subform,'check'):
distributionErrors.append( warning.NotImplemented(subform.moniker, subform ) )
if info['failOnException']:
raise NotImplementedError("Checking distribution form '%s'" % subform.moniker)
else:
distributionErrors += subform.check( info )
if distributionErrors:
warnings.append( warning.context( contextMessage + " - %s:" % subform.moniker, distributionErrors) )
if isinstance(form, uncorrelatedModule.form):
for subformName in ('angularSubform','energySubform'):
subform = getattr(form, subformName ).data
checkSubform( subform, 'uncorrelated - ' + subformName.replace('Subform','') )
elif isinstance(form, KalbachMannModule.form):
checkSubform( form, form.moniker )
else:
for subform in form.subforms:
checkSubform( subform, form.moniker )
return warnings
def diff( self, other, diffResults ) :
if( self.hasData( ) != other.hasData( ) ) :
if( self.hasData( ) ) :
diffResults.append( 'Distribution unspecified - 2', '', self.toXLink( ), other.toXLink( ) )
else :
diffResults.append( 'Distribution unspecified - 1', '', self.toXLink( ), other.toXLink( ) )
def patch( self, other ) :
pass
def findEntity( self, entityName, attribute = None, value = None ):
"""
Overrides ancestry.findEntity. Need ability to find specific distribution component
"""
if attribute is not None:
for entity in self:
if entity.moniker == entityName and getattr(entity,attribute) == value:
return entity
else:
for entity in self:
if entity.moniker == entityName:
return entity
return abstractClassesModule.component.findEntity( self, entityName, attribute, value )
def hasData( self ) :
"""
Returns False if self's only has unspecified form; otherwise, returns True.
"""
for form in self :
if( not( isinstance( form, unspecifiedModule.form ) ) ) : return( True )
return( False )
def integrate( self, reaction_suite, energyIn, energyOut = None, muOut = None, phiOut = None, frame = standardsModule.frames.productToken, LegendreOrder = 0 ) :
if( len( self ) > 0 ) :
form = self[0]
# if( form.productFrame == standardsModule.frames.centerOfMassToken ) : return( 0.0 )
if( hasattr( form, 'integrate' ) ) :
return( form.integrate( reaction_suite, energyIn, energyOut = energyOut, muOut = muOut, phiOut = phiOut, frame = frame, LegendreOrder = LegendreOrder ) )
else :
print( 'missing integrate', type( form ) )
return( 0.0 )
def toPointwise_withLinearXYs( self, **kwargs ) :
return( self.evaluated.toPointwise_withLinearXYs( **kwargs ) )
| 48.135678 | 169 | 0.614156 |
from PoPs import IDs as IDsPoPsModule
from fudge import abstractClasses as abstractClassesModule
from xData import standards as standardsModule
from . import angular as angularModule
from . import energy as energyModule
from . import energyAngular as energyAngularModule
from . import energyAngularMC as energyAngularMCModule
from . import angularEnergyMC as angularEnergyMCModule
from . import KalbachMann as KalbachMannModule
from . import angularEnergy as angularEnergyModule
from . import LLNL_angularEnergy as LLNL_angularEnergyModule
from . import uncorrelated as uncorrelatedModule
from . import Legendre as LegendreModule
from . import photonScattering as photonScatteringModule
from . import reference as referenceModule
from . import multiGroup as multiGroupModule
from . import unspecified as unspecifiedModule
from . import branching3d as branching3dModule
__metaclass__ = type
class component( abstractClassesModule.component ) :
moniker = 'distribution'
def __init__( self ) :
abstractClassesModule.component.__init__( self, ( angularModule.form, angularModule.twoBodyForm,
KalbachMannModule.form,
energyAngularModule.form, energyAngularMCModule.form,
angularEnergyModule.form, angularEnergyMCModule.form,
LLNL_angularEnergyModule.LLNLAngularEnergyForm,
uncorrelatedModule.form, LegendreModule.form, referenceModule.form,
referenceModule.CoulombPlusNuclearElastic, referenceModule.thermalNeutronScatteringLaw,
photonScatteringModule.coherentPhotonScattering.form, photonScatteringModule.incoherentPhotonScattering.form,
multiGroupModule.form, unspecifiedModule.form, branching3dModule.form ) )
def energySpectrumAtEnergy( self, energyIn, frame, **kwargs ) :
styleLabel = kwargs.get( 'styleLabel', self.evaluated.label )
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
if( frame == standardsModule.frames.centerOfMassToken ) :
if( form.productFrame == standardsModule.frames.labToken ) : form = None
else :
form = None
if( form is not None ) :
return( form.energySpectrumAtEnergy( energyIn, frame ) )
else :
form = self[styleLabel]
if( hasattr( form, 'energySpectrumAtEnergy' ) ) :
print( ' WARNING: lab to center-of-mass translation not supported.' )
else :
print( ' WARNING: distribution "%s" does not have energySpectrumAtEnergy method.' % form.moniker )
print( ' %s' % self.toXLink( ) )
return( energyModule.XYs1d( axes = energyModule.defaultAxes( form.domainUnit ) ) )
def getSpectrumAtEnergy( self, energy ) :
return( self.energySpectrumAtEnergy( energy, standardsModule.frames.labToken ) )
def calculateAverageProductData( self, style, indent = '', **kwargs ) :
form = style.findFormMatchingDerivedStyle( self )
if( form is None ) : raise Exception( 'No matching style' )
return( form.calculateAverageProductData( style, indent = indent, **kwargs ) )
def check( self, info ):
from fudge import warning
warnings = []
for form in self:
if info['isTwoBody']:
if( form.productFrame != standardsModule.frames.centerOfMassToken ) :
warnings.append( warning.wrong2BodyFrame( form ) )
if form.moniker not in (angularModule.twoBodyForm.moniker,
referenceModule.form.moniker,
referenceModule.CoulombPlusNuclearElastic.moniker,
unspecifiedModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, '2-body' ) )
else:
if form.moniker in (angularModule.twoBodyForm.moniker,
angularModule.form.moniker,
energyModule.form.moniker):
warnings.append( warning.wrongDistributionComponent( form.moniker, 'N-body' ) )
def checkSubform( subform, contextMessage ):
distributionErrors = []
if hasattr(subform, 'domainMin') and (subform.domainMin, subform.domainMax) != info['crossSectionDomain']:
domain = (subform.domainMin, subform.domainMax)
if( self.ancestor.id == IDsPoPsModule.photon ) :
startRatio = subform.domainMin / info['crossSectionDomain'][0]
endRatio = subform.domainMax / info['crossSectionDomain'][1]
if (startRatio < 1-standardsModule.floats.epsilon or endRatio < 1-standardsModule.floats.epsilon
or endRatio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
else:
for e1,e2 in zip(domain, info['crossSectionDomain']):
ratio = e1 / e2
if (ratio < 1-standardsModule.floats.epsilon or ratio > 1+standardsModule.floats.epsilon):
distributionErrors.append( warning.domain_mismatch(
*(domain + info['crossSectionDomain']), obj=subform ) )
break
if not hasattr(subform,'check'):
distributionErrors.append( warning.NotImplemented(subform.moniker, subform ) )
if info['failOnException']:
raise NotImplementedError("Checking distribution form '%s'" % subform.moniker)
else:
distributionErrors += subform.check( info )
if distributionErrors:
warnings.append( warning.context( contextMessage + " - %s:" % subform.moniker, distributionErrors) )
if isinstance(form, uncorrelatedModule.form):
for subformName in ('angularSubform','energySubform'):
subform = getattr(form, subformName ).data
checkSubform( subform, 'uncorrelated - ' + subformName.replace('Subform','') )
elif isinstance(form, KalbachMannModule.form):
checkSubform( form, form.moniker )
else:
for subform in form.subforms:
checkSubform( subform, form.moniker )
return warnings
def diff( self, other, diffResults ) :
if( self.hasData( ) != other.hasData( ) ) :
if( self.hasData( ) ) :
diffResults.append( 'Distribution unspecified - 2', '', self.toXLink( ), other.toXLink( ) )
else :
diffResults.append( 'Distribution unspecified - 1', '', self.toXLink( ), other.toXLink( ) )
def patch( self, other ) :
pass
def findEntity( self, entityName, attribute = None, value = None ):
if attribute is not None:
for entity in self:
if entity.moniker == entityName and getattr(entity,attribute) == value:
return entity
else:
for entity in self:
if entity.moniker == entityName:
return entity
return abstractClassesModule.component.findEntity( self, entityName, attribute, value )
def hasData( self ) :
for form in self :
if( not( isinstance( form, unspecifiedModule.form ) ) ) : return( True )
return( False )
def integrate( self, reaction_suite, energyIn, energyOut = None, muOut = None, phiOut = None, frame = standardsModule.frames.productToken, LegendreOrder = 0 ) :
if( len( self ) > 0 ) :
form = self[0]
if( hasattr( form, 'integrate' ) ) :
return( form.integrate( reaction_suite, energyIn, energyOut = energyOut, muOut = muOut, phiOut = phiOut, frame = frame, LegendreOrder = LegendreOrder ) )
else :
print( 'missing integrate', type( form ) )
return( 0.0 )
def toPointwise_withLinearXYs( self, **kwargs ) :
return( self.evaluated.toPointwise_withLinearXYs( **kwargs ) )
| true | true |
f72b4999422344e122188abc17640c29c420b644 | 5,593 | py | Python | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | 1 | 2020-11-24T16:59:37.000Z | 2020-11-24T16:59:37.000Z | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | tfx/components/example_gen/base_example_gen_executor_test.py | pingsutw/tfx | bf0d1d74e3f6ea429989fc7b80b82bea08077857 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.base_example_gen_executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(
pipeline,
input_dict, # pylint: disable=unused-argument
exec_properties, # pylint: disable=unused-argument
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 30000
elif split_pattern == 'train/*':
size = 20000
elif split_pattern == 'eval/*':
size = 10000
assert size != 0
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
return pipeline | beam.Create(mock_examples)
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super(BaseExampleGenExecutorTest, self).setUp()
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
self._output_dict = {'examples': [examples]}
self._train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
def testDoInputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(), preserving_proto_field_name=True)
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Input train split is bigger than eval split.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
def testDoOutputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Output split ratio: train:eval=2:1.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| 36.555556 | 79 | 0.66619 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(
pipeline,
input_dict,
exec_properties,
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 30000
elif split_pattern == 'train/*':
size = 20000
elif split_pattern == 'eval/*':
size = 10000
assert size != 0
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
return pipeline | beam.Create(mock_examples)
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super(BaseExampleGenExecutorTest, self).setUp()
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
self._output_dict = {'examples': [examples]}
self._train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
def testDoInputSplit(self):
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(), preserving_proto_field_name=True)
}
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
def testDoOutputSplit(self):
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| true | true |
f72b4a5949a2383abf083c3da9e19c3770c8c953 | 6,683 | py | Python | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | 3 | 2022-02-06T09:13:51.000Z | 2022-02-19T15:03:35.000Z | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | 1 | 2022-02-14T23:16:27.000Z | 2022-02-14T23:16:27.000Z | service/moleprop.py | uta-smile/CD-MVGNN | b48f4cd14befed298980a83edb417ab6809f0af6 | [
"MIT"
] | null | null | null | import os
import time
import math
import numpy as np
import torch
# torch.multiprocessing.set_start_method('spawn')
torch.multiprocessing.set_start_method('forkserver', force=True)
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import Namespace
from typing import List
from dglt.data.dataset.molecular import MoleculeDataset
from dglt.data.transformer.scaler import StandardScaler
from dglt.data.transformer.collator import MolCollator
from dglt.data.dataset.utils import get_data, get_data_from_smiles
from dglt.utils import load_args, load_checkpoint, load_scalers
from deploy import get_newest_train_args
from third_party.dimorphite_dl.acid_base import mol_cls
class MoleProp(object):
"""Molecular Properties Prediction Service"""
def __init__(self, checkpoint_dir, debug=print):
self.debug_ = debug
self.checkpoint_paths_ = []
for root, _, files in os.walk(checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
self.checkpoint_paths_.append(os.path.join(root, fname))
def load_model(self, args: Namespace):
"""
Load checkpoints
:param args: Arguments.
:return:
"""
self.scaler_, self.features_scaler_ = load_scalers(self.checkpoint_paths_[0])
self.train_args = load_args(self.checkpoint_paths_[0])
self.args_ = args
for key, value in vars(self.train_args).items():
if not hasattr(self.args_, key):
setattr(self.args_, key, value)
# update args with newest training args
newest_train_args = get_newest_train_args()
for key, value in vars(newest_train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
if args.features_path:
args.features_path = None
args.features_generator = ['rdkit_2d_normalized']
self.models_ = []
for checkpoint_path in tqdm(self.checkpoint_paths_, total=len(self.checkpoint_paths_)):
self.models_.append(load_checkpoint(checkpoint_path, cuda=self.args_.cuda, current_args=self.args_))
def inference(self,
model: nn.Module,
data: MoleculeDataset,
args,
batch_size: int,
shared_dict,
scaler: StandardScaler = None
) -> List[List[float]]:
"""
Do inference
:param model: model.
:param data: input data.
:param args: Arguments.
:param batch_size: batch size.
:param shared_dict: shared_dict of model.
:param scaler: scaler of input data.
:return: prediction of molecular properties.
"""
# model.share_memory()
model.eval()
args.bond_drop_rate = 0
preds = []
iter_count = 0
mol_collator = MolCollator(args=args, shared_dict=shared_dict)
mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=mol_collator)
for i, item in enumerate(mol_loader):
smiles_batch, batch, features_batch, mask, _ = item
with torch.no_grad():
batch_preds = model(batch, features_batch)
iter_count += args.batch_size
batch_preds = batch_preds.data.cpu().numpy()
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
batch_preds = batch_preds.tolist()
preds.extend(batch_preds)
return preds
def postprocessing(self, task: str = None, smiles: List[str] = None, preds: np.ndarray = None):
if task == 'caco2':
for i in range(preds.shape[0]):
if preds[i] is not None:
for j in range(len(preds[i])):
preds[i][j] = (math.pow(10, preds[i][j]) - 1) / 10
elif task == 'pka':
acid_base = mol_cls(smiles)
preds[acid_base == None] = np.nan
preds = np.column_stack((preds, np.array(acid_base, dtype=np.float)))
elif task == 'ppb':
preds[preds > 1] = 1
preds[preds < 0] = 0
return preds
def predict(self, task: str = None, smiles: List[str] = None):
"""
Predict molecular properties.
:param smiles: input data.
:return: molecular properties.
"""
self.debug_('Loading data')
tic = time.time()
self.args_.max_workers = 30
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=True, args=self.args_)
else:
test_data = get_data(path=self.args_.input_file, args=self.args_,
use_compound_names=self.args_.use_compound_names,
skip_invalid_smiles=True)
toc = time.time()
self.debug_('loading data: {}s'.format(toc - tic))
self.debug_('Validating SMILES')
tic = time.time()
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
# Edge case if empty list of smiles is provided
if len(test_data) == 0:
return [None] * len(full_data)
# Normalize features
if self.train_args.features_scaling:
test_data.normalize_features(self.features_scaler)
sum_preds = np.zeros((len(test_data), self.args_.num_tasks))
toc = time.time()
self.debug_('validating smiles: {}s'.format(toc - tic))
self.debug_(f'Predicting...')
tic = time.time()
shared_dict = {}
for model in self.models_:
model_preds = self.inference(
model=model,
data=test_data,
batch_size=self.args_.batch_size,
scaler=self.scaler_,
shared_dict=shared_dict,
args=self.args_
)
sum_preds += np.array(model_preds)
toc = time.time()
self.debug_('predicting: {}s'.format(toc - tic))
avg_preds = sum_preds / len(self.checkpoint_paths_)
avg_preds = self.postprocessing(task=task, smiles=smiles, preds=avg_preds)
avg_preds = avg_preds.tolist()
assert len(test_data) == len(avg_preds)
test_smiles = test_data.smiles()
res = {}
for i in range(len(avg_preds)):
res[test_smiles[i]] = avg_preds[i]
return {'task': task, 'task_score': res}
| 38.854651 | 115 | 0.60422 | import os
import time
import math
import numpy as np
import torch
torch.multiprocessing.set_start_method('forkserver', force=True)
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from argparse import Namespace
from typing import List
from dglt.data.dataset.molecular import MoleculeDataset
from dglt.data.transformer.scaler import StandardScaler
from dglt.data.transformer.collator import MolCollator
from dglt.data.dataset.utils import get_data, get_data_from_smiles
from dglt.utils import load_args, load_checkpoint, load_scalers
from deploy import get_newest_train_args
from third_party.dimorphite_dl.acid_base import mol_cls
class MoleProp(object):
def __init__(self, checkpoint_dir, debug=print):
self.debug_ = debug
self.checkpoint_paths_ = []
for root, _, files in os.walk(checkpoint_dir):
for fname in files:
if fname.endswith('.pt'):
self.checkpoint_paths_.append(os.path.join(root, fname))
def load_model(self, args: Namespace):
self.scaler_, self.features_scaler_ = load_scalers(self.checkpoint_paths_[0])
self.train_args = load_args(self.checkpoint_paths_[0])
self.args_ = args
for key, value in vars(self.train_args).items():
if not hasattr(self.args_, key):
setattr(self.args_, key, value)
newest_train_args = get_newest_train_args()
for key, value in vars(newest_train_args).items():
if not hasattr(args, key):
setattr(args, key, value)
if args.features_path:
args.features_path = None
args.features_generator = ['rdkit_2d_normalized']
self.models_ = []
for checkpoint_path in tqdm(self.checkpoint_paths_, total=len(self.checkpoint_paths_)):
self.models_.append(load_checkpoint(checkpoint_path, cuda=self.args_.cuda, current_args=self.args_))
def inference(self,
model: nn.Module,
data: MoleculeDataset,
args,
batch_size: int,
shared_dict,
scaler: StandardScaler = None
) -> List[List[float]]:
model.eval()
args.bond_drop_rate = 0
preds = []
iter_count = 0
mol_collator = MolCollator(args=args, shared_dict=shared_dict)
mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=mol_collator)
for i, item in enumerate(mol_loader):
smiles_batch, batch, features_batch, mask, _ = item
with torch.no_grad():
batch_preds = model(batch, features_batch)
iter_count += args.batch_size
batch_preds = batch_preds.data.cpu().numpy()
if scaler is not None:
batch_preds = scaler.inverse_transform(batch_preds)
batch_preds = batch_preds.tolist()
preds.extend(batch_preds)
return preds
def postprocessing(self, task: str = None, smiles: List[str] = None, preds: np.ndarray = None):
if task == 'caco2':
for i in range(preds.shape[0]):
if preds[i] is not None:
for j in range(len(preds[i])):
preds[i][j] = (math.pow(10, preds[i][j]) - 1) / 10
elif task == 'pka':
acid_base = mol_cls(smiles)
preds[acid_base == None] = np.nan
preds = np.column_stack((preds, np.array(acid_base, dtype=np.float)))
elif task == 'ppb':
preds[preds > 1] = 1
preds[preds < 0] = 0
return preds
def predict(self, task: str = None, smiles: List[str] = None):
self.debug_('Loading data')
tic = time.time()
self.args_.max_workers = 30
if smiles is not None:
test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=True, args=self.args_)
else:
test_data = get_data(path=self.args_.input_file, args=self.args_,
use_compound_names=self.args_.use_compound_names,
skip_invalid_smiles=True)
toc = time.time()
self.debug_('loading data: {}s'.format(toc - tic))
self.debug_('Validating SMILES')
tic = time.time()
valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]
full_data = test_data
test_data = MoleculeDataset([test_data[i] for i in valid_indices])
if len(test_data) == 0:
return [None] * len(full_data)
if self.train_args.features_scaling:
test_data.normalize_features(self.features_scaler)
sum_preds = np.zeros((len(test_data), self.args_.num_tasks))
toc = time.time()
self.debug_('validating smiles: {}s'.format(toc - tic))
self.debug_(f'Predicting...')
tic = time.time()
shared_dict = {}
for model in self.models_:
model_preds = self.inference(
model=model,
data=test_data,
batch_size=self.args_.batch_size,
scaler=self.scaler_,
shared_dict=shared_dict,
args=self.args_
)
sum_preds += np.array(model_preds)
toc = time.time()
self.debug_('predicting: {}s'.format(toc - tic))
avg_preds = sum_preds / len(self.checkpoint_paths_)
avg_preds = self.postprocessing(task=task, smiles=smiles, preds=avg_preds)
avg_preds = avg_preds.tolist()
assert len(test_data) == len(avg_preds)
test_smiles = test_data.smiles()
res = {}
for i in range(len(avg_preds)):
res[test_smiles[i]] = avg_preds[i]
return {'task': task, 'task_score': res}
| true | true |
f72b4b922b786b9836eb43cd19f849b1ef9f3014 | 2,011 | py | Python | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 5 | 2021-06-15T05:34:01.000Z | 2021-08-17T12:12:34.000Z | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 1 | 2021-06-15T13:24:48.000Z | 2021-06-15T13:24:48.000Z | cv-competition-1/pytorch_baseline/compute_overlaps_np.py | ipovalyaev/events | 64ec6324368dd21f9cedd464304eed01e1737024 | [
"MIT"
] | 7 | 2021-06-15T05:36:47.000Z | 2021-09-30T08:00:08.000Z | import time
import numpy as np
from compute_overlap import compute_overlap
def compute_overlap_np(a: np.array, b: np.array) -> np.array:
"""
Args
a: (N, 4) ndarray of float [xmin, ymin, xmax, ymax]
b: (K, 4) ndarray of float [xmin, ymin, xmax, ymax]
Returns
overlaps: (N, K) ndarray of overlap between boxes a and boxes b
"""
N, K = len(a), len(b)
overlaps = np.zeros(shape=(N, K))
for n in range(N):
a_area = (a[n, 2] - a[n, 0]) * (a[n, 3] - a[n, 1])
for k in range(K):
dx = min(a[n, 2], b[k, 2]) - max(a[n, 0], b[k, 0])
if dx >= 0:
dy = min(a[n, 3], b[k, 3]) - max(a[n, 1], b[k, 1])
if dy >= 0:
b_area = (b[k, 2] - b[k, 0]) * (b[k, 3] - b[k, 1])
intersection = max(dx, 0) * max(dy, 0)
union = a_area + b_area - intersection
overlaps[n, k] = intersection / union
return overlaps
def test_overlap_1():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[2, 2, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 1. / 7
def test_overlap_0():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[3, 3, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 0.
def test_overlap_n(a_len, b_len, box_size=100):
a = np.random.randint(0, 3000, (a_len, 4))
b = np.random.randint(0, 4000, (b_len, 4))
a = a.astype(np.float)
b = b.astype(np.float)
a[:, 2] = a[:, 0] + box_size
b[:, 2] = b[:, 0] + box_size
a[:, 3] = a[:, 1] + box_size
b[:, 3] = b[:, 1] + box_size
t1 = time.time()
o_np = compute_overlap_np(a, b)
t2 = time.time()
o_c = compute_overlap(a, b)
t3 = time.time()
assert np.array_equal(o_np, o_c)
print('Numpy time = ', t2 - t1)
print('C_ext time = ', t3 - t2)
if __name__ == '__main__':
test_overlap_1()
test_overlap_0()
test_overlap_n(100, 5, 300)
| 29.144928 | 71 | 0.513178 | import time
import numpy as np
from compute_overlap import compute_overlap
def compute_overlap_np(a: np.array, b: np.array) -> np.array:
N, K = len(a), len(b)
overlaps = np.zeros(shape=(N, K))
for n in range(N):
a_area = (a[n, 2] - a[n, 0]) * (a[n, 3] - a[n, 1])
for k in range(K):
dx = min(a[n, 2], b[k, 2]) - max(a[n, 0], b[k, 0])
if dx >= 0:
dy = min(a[n, 3], b[k, 3]) - max(a[n, 1], b[k, 1])
if dy >= 0:
b_area = (b[k, 2] - b[k, 0]) * (b[k, 3] - b[k, 1])
intersection = max(dx, 0) * max(dy, 0)
union = a_area + b_area - intersection
overlaps[n, k] = intersection / union
return overlaps
def test_overlap_1():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[2, 2, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 1. / 7
def test_overlap_0():
a = np.array([[1, 1, 3, 3]], dtype=np.float)
b = np.array([[3, 3, 4, 4]], dtype=np.float)
assert compute_overlap_np(a, b)[0][0] == 0.
def test_overlap_n(a_len, b_len, box_size=100):
a = np.random.randint(0, 3000, (a_len, 4))
b = np.random.randint(0, 4000, (b_len, 4))
a = a.astype(np.float)
b = b.astype(np.float)
a[:, 2] = a[:, 0] + box_size
b[:, 2] = b[:, 0] + box_size
a[:, 3] = a[:, 1] + box_size
b[:, 3] = b[:, 1] + box_size
t1 = time.time()
o_np = compute_overlap_np(a, b)
t2 = time.time()
o_c = compute_overlap(a, b)
t3 = time.time()
assert np.array_equal(o_np, o_c)
print('Numpy time = ', t2 - t1)
print('C_ext time = ', t3 - t2)
if __name__ == '__main__':
test_overlap_1()
test_overlap_0()
test_overlap_n(100, 5, 300)
| true | true |
f72b4bbd4fb629bc220cd90dcdf55c738b02e203 | 4,032 | py | Python | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | 2 | 2019-01-21T02:54:19.000Z | 2019-01-21T02:54:58.000Z | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | null | null | null | policy_value_net_numpy.py | wesleytao/Checkers-Reinforcement-Learning | 80d45f1c29fb7cd4503cdadedf344267553cad31 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Implement the policy value network using numpy, so that we can play with the
trained AI model without installing any DL framwork
@author: Junxiao Song
"""
from __future__ import print_function
import numpy as np
# some utility functions
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
def relu(X):
out = np.maximum(X, 0)
return out
def conv_forward(X, W, b, stride=1, padding=1):
n_filters, d_filter, h_filter, w_filter = W.shape
# theano conv2d flips the filters (rotate 180 degree) first
# while doing the calculation
W = W[:, :, ::-1, ::-1]
n_x, d_x, h_x, w_x = X.shape
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = int(h_out), int(w_out)
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = W.reshape(n_filters, -1)
out = (np.dot(W_col, X_col).T + b).T
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def fc_forward(X, W, b):
out = np.dot(X, W) + b
return out
def get_im2col_indices(x_shape, field_height,
field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height,
field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
class PolicyValueNet():
"""policy-value network in numpy """
def __init__(self, board_width, board_height, net_params):
self.board_width = board_width
self.board_height = board_height
self.params = net_params
def policy_value_fn(self, board):
"""
input: board
output: a list of (action, probability) tuples for each available
action and the score of the board state
"""
legal_positions = board.availables
current_state = board.current_state()
X = current_state.reshape(-1, 4, self.board_width, self.board_height)
# first 3 conv layers with ReLu nonlinearity
for i in [0, 2, 4]:
X = relu(conv_forward(X, self.params[i], self.params[i+1]))
# policy head
X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))
X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])
act_probs = softmax(X_p)
# value head
X_v = relu(conv_forward(X, self.params[10],
self.params[11], padding=0))
X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))
value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value
| 34.758621 | 79 | 0.615575 |
from __future__ import print_function
import numpy as np
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
def relu(X):
out = np.maximum(X, 0)
return out
def conv_forward(X, W, b, stride=1, padding=1):
n_filters, d_filter, h_filter, w_filter = W.shape
W = W[:, :, ::-1, ::-1]
n_x, d_x, h_x, w_x = X.shape
h_out = (h_x - h_filter + 2 * padding) / stride + 1
w_out = (w_x - w_filter + 2 * padding) / stride + 1
h_out, w_out = int(h_out), int(w_out)
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = W.reshape(n_filters, -1)
out = (np.dot(W_col, X_col).T + b).T
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def fc_forward(X, W, b):
out = np.dot(X, W) + b
return out
def get_im2col_indices(x_shape, field_height,
field_width, padding=1, stride=1):
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height,
field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols
class PolicyValueNet():
def __init__(self, board_width, board_height, net_params):
self.board_width = board_width
self.board_height = board_height
self.params = net_params
def policy_value_fn(self, board):
legal_positions = board.availables
current_state = board.current_state()
X = current_state.reshape(-1, 4, self.board_width, self.board_height)
for i in [0, 2, 4]:
X = relu(conv_forward(X, self.params[i], self.params[i+1]))
X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))
X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])
act_probs = softmax(X_p)
X_v = relu(conv_forward(X, self.params[10],
self.params[11], padding=0))
X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))
value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]
act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])
return act_probs, value
| true | true |
f72b4d5a2a367abecd980532b8d4750d10b4ca89 | 1,961 | py | Python | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 810 | 2018-12-25T15:16:11.000Z | 2020-05-14T09:49:40.000Z | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 701 | 2018-12-21T05:18:43.000Z | 2020-05-16T01:30:21.000Z | sdk/python/tests/utils/online_store_utils.py | ibnummuhammad/feast | 1fd9c2def1fbaca68e865a7c67336793ddb25582 | [
"Apache-2.0"
] | 155 | 2018-12-22T11:05:04.000Z | 2020-05-14T07:33:41.000Z | from datetime import datetime
import boto3
from feast import utils
from feast.infra.online_stores.helpers import compute_entity_id
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
def _create_n_customer_test_samples(n=10):
return [
(
EntityKeyProto(
join_keys=["customer"], entity_values=[ValueProto(string_val=str(i))]
),
{
"avg_orders_day": ValueProto(float_val=1.0),
"name": ValueProto(string_val="John"),
"age": ValueProto(int64_val=3),
},
datetime.utcnow(),
None,
)
for i in range(n)
]
def _create_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.create_table(
TableName=f"{project}.{tbl_name}",
KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "entity_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
def _delete_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.delete_table(TableName=f"{project}.{tbl_name}")
def _insert_data_test_table(data, project, tbl_name, region):
dynamodb_resource = boto3.resource("dynamodb", region_name=region)
table_instance = dynamodb_resource.Table(f"{project}.{tbl_name}")
for entity_key, features, timestamp, created_ts in data:
entity_id = compute_entity_id(entity_key)
with table_instance.batch_writer() as batch:
batch.put_item(
Item={
"entity_id": entity_id,
"event_ts": str(utils.make_tzaware(timestamp)),
"values": {k: v.SerializeToString() for k, v in features.items()},
}
)
| 34.403509 | 86 | 0.63182 | from datetime import datetime
import boto3
from feast import utils
from feast.infra.online_stores.helpers import compute_entity_id
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
def _create_n_customer_test_samples(n=10):
return [
(
EntityKeyProto(
join_keys=["customer"], entity_values=[ValueProto(string_val=str(i))]
),
{
"avg_orders_day": ValueProto(float_val=1.0),
"name": ValueProto(string_val="John"),
"age": ValueProto(int64_val=3),
},
datetime.utcnow(),
None,
)
for i in range(n)
]
def _create_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.create_table(
TableName=f"{project}.{tbl_name}",
KeySchema=[{"AttributeName": "entity_id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "entity_id", "AttributeType": "S"}],
BillingMode="PAY_PER_REQUEST",
)
def _delete_test_table(project, tbl_name, region):
client = boto3.client("dynamodb", region_name=region)
client.delete_table(TableName=f"{project}.{tbl_name}")
def _insert_data_test_table(data, project, tbl_name, region):
dynamodb_resource = boto3.resource("dynamodb", region_name=region)
table_instance = dynamodb_resource.Table(f"{project}.{tbl_name}")
for entity_key, features, timestamp, created_ts in data:
entity_id = compute_entity_id(entity_key)
with table_instance.batch_writer() as batch:
batch.put_item(
Item={
"entity_id": entity_id,
"event_ts": str(utils.make_tzaware(timestamp)),
"values": {k: v.SerializeToString() for k, v in features.items()},
}
)
| true | true |
f72b4fcb126c9560cd993ddf3c97358bf4458c21 | 2,011 | py | Python | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/bbcp/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bbcp(Package):
"""Securely and quickly copy data from source to target"""
homepage = "http://www.slac.stanford.edu/~abh/bbcp/"
version('git', git='http://www.slac.stanford.edu/~abh/bbcp/bbcp.git',
branch="master")
depends_on('zlib')
depends_on('openssl')
def install(self, spec, prefix):
cd("src")
make()
# BBCP wants to build the executable in a directory whose name depends
# on the system type
makesname = Executable("../MakeSname")
bbcp_executable_path = "../bin/%s/bbcp" % makesname(
output=str).rstrip("\n")
destination_path = "%s/bin/" % prefix
mkdirp(destination_path)
install(bbcp_executable_path, destination_path)
| 41.040816 | 78 | 0.651914 | true | true | |
f72b4ff25106f6a06c072e4b810373c1bc5e5e95 | 1,964 | py | Python | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | null | null | null | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | 1 | 2020-02-16T10:48:55.000Z | 2020-02-16T11:06:36.000Z | sdap/studies/admin.py | umr1085-irset/reproGenomicsViewer | 187ea320668e567d01572bfbf9497bebd691569a | [
"MIT"
] | 4 | 2019-11-04T15:00:55.000Z | 2020-03-02T13:36:17.000Z | from django.contrib import admin
from django import forms
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
import sdap.tools.forms as tool_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.apps import apps
class ExpressionStudyAdmin(admin.ModelAdmin, DynamicArrayMixin):
fieldsets = [
(None, {'fields': ['database','article', 'pmid', 'status', 'ome', 'experimental_design', 'topics', 'tissues', 'sex',
'dev_stage', 'age', 'antibody', 'mutant', 'cell_sorted', 'keywords', 'samples_count', 'read_groups', 'edit_groups',
]
}
),
]
class ExpressionDataAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name', 'file','gene_type','gene_number', 'technology', 'species' ,'cell_number', 'study'
]
}
),
]
list_display = ['name', 'class_name']
class GeneAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['gene_id','tax_id','symbol','synonyms','description','homolog_id','ensemble_id'
]
}
),
]
list_display = ['symbol', 'gene_id']
search_fields = ['symbol']
class GeneListAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name','created_by','species','genes'
]
}
),
]
autocomplete_fields = ['genes']
admin.site.register(ExpressionStudy, ExpressionStudyAdmin)
admin.site.register(ExpressionData, ExpressionDataAdmin)
admin.site.register(GeneList, GeneListAdmin)
admin.site.register(Gene, GeneAdmin)
admin.site.register(Database)
| 32.733333 | 155 | 0.556517 | from django.contrib import admin
from django import forms
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
import sdap.tools.forms as tool_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.apps import apps
class ExpressionStudyAdmin(admin.ModelAdmin, DynamicArrayMixin):
fieldsets = [
(None, {'fields': ['database','article', 'pmid', 'status', 'ome', 'experimental_design', 'topics', 'tissues', 'sex',
'dev_stage', 'age', 'antibody', 'mutant', 'cell_sorted', 'keywords', 'samples_count', 'read_groups', 'edit_groups',
]
}
),
]
class ExpressionDataAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name', 'file','gene_type','gene_number', 'technology', 'species' ,'cell_number', 'study'
]
}
),
]
list_display = ['name', 'class_name']
class GeneAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['gene_id','tax_id','symbol','synonyms','description','homolog_id','ensemble_id'
]
}
),
]
list_display = ['symbol', 'gene_id']
search_fields = ['symbol']
class GeneListAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name','created_by','species','genes'
]
}
),
]
autocomplete_fields = ['genes']
admin.site.register(ExpressionStudy, ExpressionStudyAdmin)
admin.site.register(ExpressionData, ExpressionDataAdmin)
admin.site.register(GeneList, GeneListAdmin)
admin.site.register(Gene, GeneAdmin)
admin.site.register(Database)
| true | true |
f72b50a50d6a2732a98caa1aae33253e14cfd9d0 | 1,772 | py | Python | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | 10 | 2020-11-09T18:12:43.000Z | 2021-12-17T16:48:54.000Z | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | null | null | null | examples/simple/fe.py | HQSquantumsimulations/ActiveSpaceFinder | 2471dd1cb764169a1be51937addea04813cf42d1 | [
"Apache-2.0"
] | 3 | 2021-01-26T12:41:08.000Z | 2021-12-16T04:33:11.000Z | # Copyright 2020 HQS Quantum Simulations GmbH
# Reza Ghafarian Shirazi, Thilo Mast.
# reza.shirazi@quantumsimulations.de
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf import gto, scf
import asf
mol = gto.Mole()
mol.atom = """
Fe -0.0000000 0.0000000 0.0000000
O 0.0000000 2.0622910 0.0000000
H 0.7919274 2.6471973 0.0000000
H -0.7919274 2.6471973 0.0000000
O -0.0000000 0.0000000 2.0622910
H -0.0000000 0.7919274 2.6471973
H 0.0000000 -0.7919274 2.6471973
O 2.0622910 -0.0000000 -0.0000000
H 2.6471973 -0.0000000 0.7919274
H 2.6471973 -0.0000000 -0.7919274
O -0.0000000 -2.0622910 0.0000000
H -0.7919274 -2.6471973 -0.0000000
H 0.7919274 -2.6471973 -0.0000000
O 0.0000000 0.0000000 -2.0622910
H 0.0000000 -0.7919274 -2.6471973
H -0.0000000 0.7919274 -2.6471973
O -2.0622910 0.0000000 0.0000000
H -2.6471973 0.0000000 -0.7919274
H -2.6471973 0.0000000 0.7919274
"""
mol.basis = {'default': 'minao'}
mol.charge = 3
mol.spin = 3
mol.verbose = 0
mol.build()
# UHF for UNOs
mf = scf.RHF(mol).run(max_cycle=100)
mo_new = mf.stability()[0]
while mo_new is not mf.mo_coeff:
mf.kernel(dm0=mf.make_rdm1(mo_coeff=mo_new))
mo_new = mf.stability()[0]
# Call the wrapper function.
ASF = asf.asf()
ele, mos = ASF.fas_no_guess(mf, nat_type='MP2', machine_limit=11)
| 30.551724 | 74 | 0.738149 |
from pyscf import gto, scf
import asf
mol = gto.Mole()
mol.atom = """
Fe -0.0000000 0.0000000 0.0000000
O 0.0000000 2.0622910 0.0000000
H 0.7919274 2.6471973 0.0000000
H -0.7919274 2.6471973 0.0000000
O -0.0000000 0.0000000 2.0622910
H -0.0000000 0.7919274 2.6471973
H 0.0000000 -0.7919274 2.6471973
O 2.0622910 -0.0000000 -0.0000000
H 2.6471973 -0.0000000 0.7919274
H 2.6471973 -0.0000000 -0.7919274
O -0.0000000 -2.0622910 0.0000000
H -0.7919274 -2.6471973 -0.0000000
H 0.7919274 -2.6471973 -0.0000000
O 0.0000000 0.0000000 -2.0622910
H 0.0000000 -0.7919274 -2.6471973
H -0.0000000 0.7919274 -2.6471973
O -2.0622910 0.0000000 0.0000000
H -2.6471973 0.0000000 -0.7919274
H -2.6471973 0.0000000 0.7919274
"""
mol.basis = {'default': 'minao'}
mol.charge = 3
mol.spin = 3
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).run(max_cycle=100)
mo_new = mf.stability()[0]
while mo_new is not mf.mo_coeff:
mf.kernel(dm0=mf.make_rdm1(mo_coeff=mo_new))
mo_new = mf.stability()[0]
ASF = asf.asf()
ele, mos = ASF.fas_no_guess(mf, nat_type='MP2', machine_limit=11)
| true | true |
f72b50aeba2955ce6ef70f323284adca857cfc4f | 3,088 | py | Python | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | null | null | null | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | 3 | 2021-06-08T22:25:30.000Z | 2022-01-13T03:18:29.000Z | yolo_app/etc/commons/opencv_helpers.py | arvincsh/multiobjectdetection | 26b4d43ce981a7a4cd031611df70b8f7c08757df | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from math import sqrt
from scipy.spatial import distance
from yolo_app.etc.config import config
def crop_image(save_path, img, xywh):
x = xywh[0]
y = xywh[1]
w = xywh[2]
h = xywh[3]
crop_img = img[y:y + h, x:x + w]
cv2.imwrite(save_path, crop_img)
def np_xyxy2xywh(xyxy, data_type=int):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
xywh = np.zeros_like(xyxy)
x1 = xyxy[0]
y1 = xyxy[1]
x2 = xyxy[2]
y2 = xyxy[3]
xywh[0] = xyxy[0]
xywh[1] = xyxy[1]
xywh[2] = data_type(abs(x2 - x1))
xywh[3] = data_type(abs(y1 - y2))
return xywh
def torch2np_xyxy(xyxy, data_type=int):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
# CPU Mode
try:
np_xyxy = np.zeros_like(xyxy)
# GPU Mode
except:
np_xyxy = np.zeros_like(xyxy.data.cpu().numpy())
np_xyxy[0] = data_type(xyxy[0])
np_xyxy[1] = data_type(xyxy[1])
np_xyxy[2] = data_type(xyxy[2])
np_xyxy[3] = data_type(xyxy[3])
return np_xyxy
def get_det_xyxy(det):
numpy_xyxy = torch2np_xyxy(det[:4])
return numpy_xyxy
# Merged of 2 bounding boxes (xyxy and xyxy)
def get_mbbox(obj_1, obj_2):
box1_x1 = obj_1[0]
box1_y1 = obj_1[1]
box1_x2 = obj_1[2]
box1_y2 = obj_1[3]
box2_x1 = obj_2[0]
box2_y1 = obj_2[1]
box2_x2 = obj_2[2]
box2_y2 = obj_2[3]
mbbox = [
min(box1_x1, box2_x1),
min(box1_y1, box2_y1),
max(box1_x2, box2_x2),
max(box1_y2, box2_y2)
]
return mbbox
def np_xyxy2centroid(xyxy):
centroid_x = (xyxy[0] + xyxy[2]) / 2
centroid_y = (xyxy[1] + xyxy[3]) / 2
return np.asarray([centroid_x, centroid_y])
def get_xyxy_distance(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(o1cx_o2cx + o1cy_o2cy)
return dist
def get_xyxy_distance_manhattan(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(distance.cityblock(o1cx_o2cx, o1cy_o2cy))
return dist
def save_txt(save_path, txt_format, bbox_xyxy=None, w_type='a', img_ext=".png", cls=None, conf=1.0):
txt_path = save_path.replace(img_ext, '')
with open(txt_path + '.txt', w_type) as file:
if bbox_xyxy is None:
file.write("")
else:
if cls is None:
cls = config["bbox_config"]["default_label"]
if txt_format == "default":
file.write(('%g ' * 6 + '\n') % (bbox_xyxy, cls, conf))
elif txt_format == "cartucho":
str_output = cls + " "
str_output += str(conf) + " "
str_output += str(int(bbox_xyxy[0])) + " " + \
str(int(bbox_xyxy[1])) + " " + \
str(int(bbox_xyxy[2])) + " " + \
str(int(bbox_xyxy[3])) + "\n"
file.write(str_output)
else:
pass
| 26.393162 | 100 | 0.562176 | import cv2
import numpy as np
from math import sqrt
from scipy.spatial import distance
from yolo_app.etc.config import config
def crop_image(save_path, img, xywh):
x = xywh[0]
y = xywh[1]
w = xywh[2]
h = xywh[3]
crop_img = img[y:y + h, x:x + w]
cv2.imwrite(save_path, crop_img)
def np_xyxy2xywh(xyxy, data_type=int):
xywh = np.zeros_like(xyxy)
x1 = xyxy[0]
y1 = xyxy[1]
x2 = xyxy[2]
y2 = xyxy[3]
xywh[0] = xyxy[0]
xywh[1] = xyxy[1]
xywh[2] = data_type(abs(x2 - x1))
xywh[3] = data_type(abs(y1 - y2))
return xywh
def torch2np_xyxy(xyxy, data_type=int):
try:
np_xyxy = np.zeros_like(xyxy)
except:
np_xyxy = np.zeros_like(xyxy.data.cpu().numpy())
np_xyxy[0] = data_type(xyxy[0])
np_xyxy[1] = data_type(xyxy[1])
np_xyxy[2] = data_type(xyxy[2])
np_xyxy[3] = data_type(xyxy[3])
return np_xyxy
def get_det_xyxy(det):
numpy_xyxy = torch2np_xyxy(det[:4])
return numpy_xyxy
def get_mbbox(obj_1, obj_2):
box1_x1 = obj_1[0]
box1_y1 = obj_1[1]
box1_x2 = obj_1[2]
box1_y2 = obj_1[3]
box2_x1 = obj_2[0]
box2_y1 = obj_2[1]
box2_x2 = obj_2[2]
box2_y2 = obj_2[3]
mbbox = [
min(box1_x1, box2_x1),
min(box1_y1, box2_y1),
max(box1_x2, box2_x2),
max(box1_y2, box2_y2)
]
return mbbox
def np_xyxy2centroid(xyxy):
centroid_x = (xyxy[0] + xyxy[2]) / 2
centroid_y = (xyxy[1] + xyxy[3]) / 2
return np.asarray([centroid_x, centroid_y])
def get_xyxy_distance(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(o1cx_o2cx + o1cy_o2cy)
return dist
def get_xyxy_distance_manhattan(xyxy_1, xyxy_2):
o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)
o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)
dist = sqrt(distance.cityblock(o1cx_o2cx, o1cy_o2cy))
return dist
def save_txt(save_path, txt_format, bbox_xyxy=None, w_type='a', img_ext=".png", cls=None, conf=1.0):
txt_path = save_path.replace(img_ext, '')
with open(txt_path + '.txt', w_type) as file:
if bbox_xyxy is None:
file.write("")
else:
if cls is None:
cls = config["bbox_config"]["default_label"]
if txt_format == "default":
file.write(('%g ' * 6 + '\n') % (bbox_xyxy, cls, conf))
elif txt_format == "cartucho":
str_output = cls + " "
str_output += str(conf) + " "
str_output += str(int(bbox_xyxy[0])) + " " + \
str(int(bbox_xyxy[1])) + " " + \
str(int(bbox_xyxy[2])) + " " + \
str(int(bbox_xyxy[3])) + "\n"
file.write(str_output)
else:
pass
| true | true |
f72b5313853fd9dfc01ec04630bfc7a8a3c1dfba | 2,762 | py | Python | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | simple_generate.py | yuning1990/GPT2-Chinese | 299855ad9a84ad81b2da6e2c583af124f6ed6ff9 | [
"MIT"
] | null | null | null | from generate import *
from datetime import datetime
def main():
''' e.g.
python ./generate.py --length=512
--nsamples=1
--prefix=[MASK]哈利站在窗边
--tokenizer_path cache/vocab_small.txt
--topk 40 --model_path model/model_epoch29
--save_samples --save_samples_path result/20210915_29_1135
--model_config model/model_epoch29/config.json --repetition_penalty 1.05 --temperature 1.1
'''
parser = argparse.ArgumentParser()
parser.add_argument('--key', default='intro', type=str, required=False, help='哪个模型')
parser.add_argument('--model_v', default='-1', type=str, required=False, help='第几个模型')
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')
parser.add_argument('--length', default=1024, type=int, required=False, help='生成长度')
parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')
parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本')
parser.add_argument('--temperature', default=1.1, type=float, required=False, help='生成温度')
parser.add_argument('--topk', default=20, type=int, required=False, help='最高几选一')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径')
parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')
parser.add_argument('--prefix', default='哈利站在窗边', type=str, required=False, help='生成文章的开头')
parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')
parser.add_argument('--save_samples', default=True, help='保存产生的样本')
parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径")
parser.add_argument('--repetition_penalty', default=1.05, type=float, required=False)
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.model_v != '-1':
args.model_path = '{}/model_epoch{}'.format(args.model_path.split('/')[0], args.model_v)
else:
args.model_path = args.model_path
t = str(datetime.now())
d = ''.join('_'.join(''.join(t.split(":")[:-1]).split(' ')).split('-'))
args.save_samples_path = 'result_{}/{}_v{}'.format(args.key, d, args.model_v)
Generate().run(args)
if __name__ == '__main__':
main()
| 54.156863 | 115 | 0.68139 | from generate import *
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--key', default='intro', type=str, required=False, help='哪个模型')
parser.add_argument('--model_v', default='-1', type=str, required=False, help='第几个模型')
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')
parser.add_argument('--length', default=1024, type=int, required=False, help='生成长度')
parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')
parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本')
parser.add_argument('--temperature', default=1.1, type=float, required=False, help='生成温度')
parser.add_argument('--topk', default=20, type=int, required=False, help='最高几选一')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径')
parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')
parser.add_argument('--prefix', default='哈利站在窗边', type=str, required=False, help='生成文章的开头')
parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')
parser.add_argument('--save_samples', default=True, help='保存产生的样本')
parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径")
parser.add_argument('--repetition_penalty', default=1.05, type=float, required=False)
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.model_v != '-1':
args.model_path = '{}/model_epoch{}'.format(args.model_path.split('/')[0], args.model_v)
else:
args.model_path = args.model_path
t = str(datetime.now())
d = ''.join('_'.join(''.join(t.split(":")[:-1]).split(' ')).split('-'))
args.save_samples_path = 'result_{}/{}_v{}'.format(args.key, d, args.model_v)
Generate().run(args)
if __name__ == '__main__':
main()
| true | true |
f72b5336226a539fe7cd56683840a850c1179b1c | 8,536 | py | Python | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | awareutils/vision/col.py | anna-aware/awareutils | 5571de71ad6f0adad6361e39f82073ba4dd402c5 | [
"MIT"
] | null | null | null | import hashlib
from typing import Iterable, Tuple
from loguru import logger
class Col:
named: "NamedCols"
def __init__(self, r: int, g: int, b: int, clip: bool = False, fix_numeric_type: bool = True):
self._clip = clip
self._fix_numeric_type = fix_numeric_type
self.r = r # Note this is calling the setter
self.g = g
self.b = b
@property
def r(self) -> int:
return self._r
@r.setter
def r(self, r: int) -> None:
self._r = self._validate_uint8(r)
@property
def g(self) -> int:
return self._g
def __eq__(self, c: "Col") -> bool:
return self.r == c.r and self.g == c.g and self.b == c.b
@g.setter
def g(self, g: int) -> None:
self._g = self._validate_uint8(g)
@property
def b(self) -> int:
return self._b
@b.setter
def b(self, b: int) -> None:
self._b = self._validate_uint8(b)
@property
def rgb(self) -> Tuple[int, int, int]:
return (self.r, self.g, self.b)
@property
def bgr(self) -> Tuple[int, int, int]:
return (self.b, self.g, self.r)
def _validate_uint8(self, c: int) -> int:
if c is None:
raise ValueError("Color r/g/b must not be None")
if not isinstance(c, int):
if self._fix_numeric_type:
logger.debug("Color r/g/b is meant to be int, so trying to coerce to int")
c = int(c)
else:
raise ValueError("Color r/g/b is meant to be int but it isn't.")
# Should always be >= 0
if c < 0 or c > 255:
if self._clip:
c = min(255, max(0, c))
logger.debug("Color r/g/b must be 0 - 255 but it isn't, so clipping to this range.")
else:
raise ValueError("Color r/g/b must be 0 - 255 but it isn't.")
# Phew, done:
return c
def pick_col(s: str) -> Col:
if not isinstance(s, str):
raise RuntimeError("Please provide a string argument to pick_col")
# Approach based on https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L737
# i.e. hash the string representation
digest = hashlib.md5(s.encode("utf8")).hexdigest()
n = int(len(digest) / 3)
mx = 2 ** (4 * n) - 1
rgb = (int(int(digest[i * n : (i + 1) * n], 16) / mx * 256) for i in range(3))
return Col(*rgb)
class DivergingPalette:
def __init__(self, labels: Iterable[str] = None):
# ColorBrewer Diverging 12-class Paired
self._cols = (
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
)
# Create the lookup (with our own Col objects so they can be mutated)
self._col_map = {}
if labels is not None:
for idx, label in enumerate(labels):
self._col_map[label] = Col(*self._cols[idx % len(self._cols)])
def col(self, label: str) -> Col:
if label not in self._col_map:
idx = len(self._col_map) % len(self._cols)
self._col_map[label] = Col(*self._cols[idx])
return self._col_map[label]
class NamedCols:
alice_blue = Col(240, 248, 255)
antique_white = Col(250, 235, 215)
aqua = Col(0, 255, 255)
aqua_marine = Col(127, 255, 212)
aware_blue_dark = Col(0, 81, 155)
aware_blue_light = Col(87, 200, 231)
azure = Col(240, 255, 255)
beige = Col(245, 245, 220)
bisque = Col(255, 228, 196)
black = Col(0, 0, 0)
blanched_almond = Col(255, 235, 205)
blue = Col(0, 0, 255)
blue_violet = Col(138, 43, 226)
brown = Col(165, 42, 42)
burly_wood = Col(222, 184, 135)
cadet_blue = Col(95, 158, 160)
chart_reuse = Col(127, 255, 0)
chocolate = Col(210, 105, 30)
coral = Col(255, 127, 80)
corn_flower_blue = Col(100, 149, 237)
corn_silk = Col(255, 248, 220)
crimson = Col(220, 20, 60)
cyan = Col(0, 255, 255)
dark_blue = Col(0, 0, 139)
dark_cyan = Col(0, 139, 139)
dark_golden_rod = Col(184, 134, 11)
dark_gray = Col(169, 169, 169)
dark_green = Col(0, 100, 0)
dark_grey = Col(169, 169, 169)
dark_khaki = Col(189, 183, 107)
dark_magenta = Col(139, 0, 139)
dark_olive_green = Col(85, 107, 47)
dark_orange = Col(255, 140, 0)
dark_orchid = Col(153, 50, 204)
dark_red = Col(139, 0, 0)
dark_salmon = Col(233, 150, 122)
dark_sea_green = Col(143, 188, 143)
dark_slate_blue = Col(72, 61, 139)
dark_slate_gray = Col(47, 79, 79)
dark_turquoise = Col(0, 206, 209)
dark_violet = Col(148, 0, 211)
deep_pink = Col(255, 20, 147)
deep_sky_blue = Col(0, 191, 255)
dim_gray = Col(105, 105, 105)
dim_grey = Col(105, 105, 105)
dodger_blue = Col(30, 144, 255)
firebrick = Col(178, 34, 34)
floral_white = Col(255, 250, 240)
forest_green = Col(34, 139, 34)
fuchsia = Col(255, 0, 255)
gainsboro = Col(220, 220, 220)
ghost_white = Col(248, 248, 255)
gold = Col(255, 215, 0)
golden_rod = Col(218, 165, 32)
gray = Col(128, 128, 128)
green = Col(0, 128, 0)
green_yellow = Col(173, 255, 47)
grey = Col(128, 128, 128)
honeydew = Col(240, 255, 240)
hot_pink = Col(255, 105, 180)
indian_red = Col(205, 92, 92)
indigo = Col(75, 0, 130)
ivory = Col(255, 255, 240)
khaki = Col(240, 230, 140)
lavender = Col(230, 230, 250)
lavender_blush = Col(255, 240, 245)
lawn_green = Col(124, 252, 0)
lemon_chiffon = Col(255, 250, 205)
light_blue = Col(173, 216, 230)
light_coral = Col(240, 128, 128)
light_cyan = Col(224, 255, 255)
light_golden_rod_yellow = Col(250, 250, 210)
light_gray = Col(211, 211, 211)
light_green = Col(144, 238, 144)
light_grey = Col(211, 211, 211)
light_pink = Col(255, 182, 193)
light_salmon = Col(255, 160, 122)
light_sea_green = Col(32, 178, 170)
light_sky_blue = Col(135, 206, 250)
light_slate_gray = Col(119, 136, 153)
light_steel_blue = Col(176, 196, 222)
light_yellow = Col(255, 255, 224)
lime = Col(0, 255, 0)
lime_green = Col(50, 205, 50)
linen = Col(250, 240, 230)
magenta = Col(255, 0, 255)
maroon = Col(128, 0, 0)
medium_aqua_marine = Col(102, 205, 170)
medium_blue = Col(0, 0, 205)
medium_orchid = Col(186, 85, 211)
medium_purple = Col(147, 112, 219)
medium_sea_green = Col(60, 179, 113)
medium_slate_blue = Col(123, 104, 238)
medium_spring_green = Col(0, 250, 154)
medium_turquoise = Col(72, 209, 204)
medium_violet_red = Col(199, 21, 133)
midnight_blue = Col(25, 25, 112)
mint_cream = Col(245, 255, 250)
misty_rose = Col(255, 228, 225)
moccasin = Col(255, 228, 181)
navajo_white = Col(255, 222, 173)
navy = Col(0, 0, 128)
old_lace = Col(253, 245, 230)
olive = Col(128, 128, 0)
olive_drab = Col(107, 142, 35)
orange = Col(255, 165, 0)
orange_red = Col(255, 69, 0)
orchid = Col(218, 112, 214)
pale_golden_rod = Col(238, 232, 170)
pale_green = Col(152, 251, 152)
pale_turquoise = Col(175, 238, 238)
pale_violet_red = Col(219, 112, 147)
papaya_whip = Col(255, 239, 213)
peach_puff = Col(255, 218, 185)
peru = Col(205, 133, 63)
pink = Col(255, 192, 203)
plum = Col(221, 160, 221)
powder_blue = Col(176, 224, 230)
purple = Col(128, 0, 128)
red = Col(255, 0, 0)
rosy_brown = Col(188, 143, 143)
royal_blue = Col(65, 105, 225)
saddle_brown = Col(139, 69, 19)
salmon = Col(250, 128, 114)
sandy_brown = Col(244, 164, 96)
sea_green = Col(46, 139, 87)
sea_shell = Col(255, 245, 238)
sienna = Col(160, 82, 45)
silver = Col(192, 192, 192)
sky_blue = Col(135, 206, 235)
slate_blue = Col(106, 90, 205)
slate_gray = Col(112, 128, 144)
snow = Col(255, 250, 250)
spring_green = Col(0, 255, 127)
steel_blue = Col(70, 130, 180)
tan = Col(210, 180, 140)
teal = Col(0, 128, 128)
thistle = Col(216, 191, 216)
tomato = Col(255, 99, 71)
turquoise = Col(64, 224, 208)
violet = Col(238, 130, 238)
wheat = Col(245, 222, 179)
white = Col(255, 255, 255)
white_smoke = Col(245, 245, 245)
yellow = Col(255, 255, 0)
yellow_green = Col(154, 205, 50)
Col.named = NamedCols
| 31.732342 | 115 | 0.573336 | import hashlib
from typing import Iterable, Tuple
from loguru import logger
class Col:
named: "NamedCols"
def __init__(self, r: int, g: int, b: int, clip: bool = False, fix_numeric_type: bool = True):
self._clip = clip
self._fix_numeric_type = fix_numeric_type
self.r = r
self.g = g
self.b = b
@property
def r(self) -> int:
return self._r
@r.setter
def r(self, r: int) -> None:
self._r = self._validate_uint8(r)
@property
def g(self) -> int:
return self._g
def __eq__(self, c: "Col") -> bool:
return self.r == c.r and self.g == c.g and self.b == c.b
@g.setter
def g(self, g: int) -> None:
self._g = self._validate_uint8(g)
@property
def b(self) -> int:
return self._b
@b.setter
def b(self, b: int) -> None:
self._b = self._validate_uint8(b)
@property
def rgb(self) -> Tuple[int, int, int]:
return (self.r, self.g, self.b)
@property
def bgr(self) -> Tuple[int, int, int]:
return (self.b, self.g, self.r)
def _validate_uint8(self, c: int) -> int:
if c is None:
raise ValueError("Color r/g/b must not be None")
if not isinstance(c, int):
if self._fix_numeric_type:
logger.debug("Color r/g/b is meant to be int, so trying to coerce to int")
c = int(c)
else:
raise ValueError("Color r/g/b is meant to be int but it isn't.")
# Should always be >= 0
if c < 0 or c > 255:
if self._clip:
c = min(255, max(0, c))
logger.debug("Color r/g/b must be 0 - 255 but it isn't, so clipping to this range.")
else:
raise ValueError("Color r/g/b must be 0 - 255 but it isn't.")
# Phew, done:
return c
def pick_col(s: str) -> Col:
if not isinstance(s, str):
raise RuntimeError("Please provide a string argument to pick_col")
# Approach based on https://github.com/vaab/colour/blob/11f138eb7841d2045160b378a2eec0c2321144c0/colour.py#L737
# i.e. hash the string representation
digest = hashlib.md5(s.encode("utf8")).hexdigest()
n = int(len(digest) / 3)
mx = 2 ** (4 * n) - 1
rgb = (int(int(digest[i * n : (i + 1) * n], 16) / mx * 256) for i in range(3))
return Col(*rgb)
class DivergingPalette:
def __init__(self, labels: Iterable[str] = None):
# ColorBrewer Diverging 12-class Paired
self._cols = (
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
)
# Create the lookup (with our own Col objects so they can be mutated)
self._col_map = {}
if labels is not None:
for idx, label in enumerate(labels):
self._col_map[label] = Col(*self._cols[idx % len(self._cols)])
def col(self, label: str) -> Col:
if label not in self._col_map:
idx = len(self._col_map) % len(self._cols)
self._col_map[label] = Col(*self._cols[idx])
return self._col_map[label]
class NamedCols:
alice_blue = Col(240, 248, 255)
antique_white = Col(250, 235, 215)
aqua = Col(0, 255, 255)
aqua_marine = Col(127, 255, 212)
aware_blue_dark = Col(0, 81, 155)
aware_blue_light = Col(87, 200, 231)
azure = Col(240, 255, 255)
beige = Col(245, 245, 220)
bisque = Col(255, 228, 196)
black = Col(0, 0, 0)
blanched_almond = Col(255, 235, 205)
blue = Col(0, 0, 255)
blue_violet = Col(138, 43, 226)
brown = Col(165, 42, 42)
burly_wood = Col(222, 184, 135)
cadet_blue = Col(95, 158, 160)
chart_reuse = Col(127, 255, 0)
chocolate = Col(210, 105, 30)
coral = Col(255, 127, 80)
corn_flower_blue = Col(100, 149, 237)
corn_silk = Col(255, 248, 220)
crimson = Col(220, 20, 60)
cyan = Col(0, 255, 255)
dark_blue = Col(0, 0, 139)
dark_cyan = Col(0, 139, 139)
dark_golden_rod = Col(184, 134, 11)
dark_gray = Col(169, 169, 169)
dark_green = Col(0, 100, 0)
dark_grey = Col(169, 169, 169)
dark_khaki = Col(189, 183, 107)
dark_magenta = Col(139, 0, 139)
dark_olive_green = Col(85, 107, 47)
dark_orange = Col(255, 140, 0)
dark_orchid = Col(153, 50, 204)
dark_red = Col(139, 0, 0)
dark_salmon = Col(233, 150, 122)
dark_sea_green = Col(143, 188, 143)
dark_slate_blue = Col(72, 61, 139)
dark_slate_gray = Col(47, 79, 79)
dark_turquoise = Col(0, 206, 209)
dark_violet = Col(148, 0, 211)
deep_pink = Col(255, 20, 147)
deep_sky_blue = Col(0, 191, 255)
dim_gray = Col(105, 105, 105)
dim_grey = Col(105, 105, 105)
dodger_blue = Col(30, 144, 255)
firebrick = Col(178, 34, 34)
floral_white = Col(255, 250, 240)
forest_green = Col(34, 139, 34)
fuchsia = Col(255, 0, 255)
gainsboro = Col(220, 220, 220)
ghost_white = Col(248, 248, 255)
gold = Col(255, 215, 0)
golden_rod = Col(218, 165, 32)
gray = Col(128, 128, 128)
green = Col(0, 128, 0)
green_yellow = Col(173, 255, 47)
grey = Col(128, 128, 128)
honeydew = Col(240, 255, 240)
hot_pink = Col(255, 105, 180)
indian_red = Col(205, 92, 92)
indigo = Col(75, 0, 130)
ivory = Col(255, 255, 240)
khaki = Col(240, 230, 140)
lavender = Col(230, 230, 250)
lavender_blush = Col(255, 240, 245)
lawn_green = Col(124, 252, 0)
lemon_chiffon = Col(255, 250, 205)
light_blue = Col(173, 216, 230)
light_coral = Col(240, 128, 128)
light_cyan = Col(224, 255, 255)
light_golden_rod_yellow = Col(250, 250, 210)
light_gray = Col(211, 211, 211)
light_green = Col(144, 238, 144)
light_grey = Col(211, 211, 211)
light_pink = Col(255, 182, 193)
light_salmon = Col(255, 160, 122)
light_sea_green = Col(32, 178, 170)
light_sky_blue = Col(135, 206, 250)
light_slate_gray = Col(119, 136, 153)
light_steel_blue = Col(176, 196, 222)
light_yellow = Col(255, 255, 224)
lime = Col(0, 255, 0)
lime_green = Col(50, 205, 50)
linen = Col(250, 240, 230)
magenta = Col(255, 0, 255)
maroon = Col(128, 0, 0)
medium_aqua_marine = Col(102, 205, 170)
medium_blue = Col(0, 0, 205)
medium_orchid = Col(186, 85, 211)
medium_purple = Col(147, 112, 219)
medium_sea_green = Col(60, 179, 113)
medium_slate_blue = Col(123, 104, 238)
medium_spring_green = Col(0, 250, 154)
medium_turquoise = Col(72, 209, 204)
medium_violet_red = Col(199, 21, 133)
midnight_blue = Col(25, 25, 112)
mint_cream = Col(245, 255, 250)
misty_rose = Col(255, 228, 225)
moccasin = Col(255, 228, 181)
navajo_white = Col(255, 222, 173)
navy = Col(0, 0, 128)
old_lace = Col(253, 245, 230)
olive = Col(128, 128, 0)
olive_drab = Col(107, 142, 35)
orange = Col(255, 165, 0)
orange_red = Col(255, 69, 0)
orchid = Col(218, 112, 214)
pale_golden_rod = Col(238, 232, 170)
pale_green = Col(152, 251, 152)
pale_turquoise = Col(175, 238, 238)
pale_violet_red = Col(219, 112, 147)
papaya_whip = Col(255, 239, 213)
peach_puff = Col(255, 218, 185)
peru = Col(205, 133, 63)
pink = Col(255, 192, 203)
plum = Col(221, 160, 221)
powder_blue = Col(176, 224, 230)
purple = Col(128, 0, 128)
red = Col(255, 0, 0)
rosy_brown = Col(188, 143, 143)
royal_blue = Col(65, 105, 225)
saddle_brown = Col(139, 69, 19)
salmon = Col(250, 128, 114)
sandy_brown = Col(244, 164, 96)
sea_green = Col(46, 139, 87)
sea_shell = Col(255, 245, 238)
sienna = Col(160, 82, 45)
silver = Col(192, 192, 192)
sky_blue = Col(135, 206, 235)
slate_blue = Col(106, 90, 205)
slate_gray = Col(112, 128, 144)
snow = Col(255, 250, 250)
spring_green = Col(0, 255, 127)
steel_blue = Col(70, 130, 180)
tan = Col(210, 180, 140)
teal = Col(0, 128, 128)
thistle = Col(216, 191, 216)
tomato = Col(255, 99, 71)
turquoise = Col(64, 224, 208)
violet = Col(238, 130, 238)
wheat = Col(245, 222, 179)
white = Col(255, 255, 255)
white_smoke = Col(245, 245, 245)
yellow = Col(255, 255, 0)
yellow_green = Col(154, 205, 50)
Col.named = NamedCols
| true | true |
f72b548ac9a0d323d3b91f562646813415979b88 | 8,590 | py | Python | python/caffe/detector.py | MilesQLi/highway-networks | 87d1c8d091ed698b7959c6dbcbbe2ac2e8bf5e3e | [
"BSD-2-Clause"
] | 106 | 2015-08-11T05:45:50.000Z | 2021-04-08T02:29:07.000Z | python/caffe/detector.py | scott89/caffe-crowd | de1875c33e311c12df7dc33decda67706dbf250a | [
"BSD-2-Clause"
] | 2 | 2016-07-07T15:04:10.000Z | 2016-09-12T14:00:13.000Z | python/caffe/detector.py | scott89/caffe-crowd | de1875c33e311c12df7dc33decda67706dbf250a | [
"BSD-2-Clause"
] | 36 | 2015-08-14T07:33:42.000Z | 2021-03-11T09:48:40.000Z | #!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
"""
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
"""
Take
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
context_pad: amount of surrounding context to take s.t. a `context_pad`
sized border of pixels in the network input image is context, as in
R-CNN feature extraction.
"""
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Parameters
----------
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2, 3))
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Parameters
----------
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Returns
-------
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Parameters
----------
context_pad : amount of context for cropping.
"""
# crop dimensions
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| 39.585253 | 80 | 0.590803 |
import numpy as np
import os
import caffe
class Detector(caffe.Net):
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]].squeeze(axis=(2, 3))
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
import selective_search_ijcv_with_python as selective_search
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width
scale = crop_size / (1. * crop_size - self.context_pad * 2)
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h)
pad_x = round(max(0, -box[1]) * scale_w)
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| true | true |
f72b56c1b85626f170db0a70640a8d036cae722e | 858 | py | Python | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | pcaspy/tools.py | dchabot/python-pcaspy | 42c32e90b854414d0f929a1ecef32b7438344d42 | [
"BSD-3-Clause"
] | null | null | null | import threading
# Thread running server processing loop
class ServerThread(threading.Thread):
"""
A helper class to run server in a thread.
The following snippet runs the server for 4 seconds and quit::
server = SimpleServer()
server_thread = ServerThread(server)
server_thread.start()
time.sleep(4)
server_thread.stop()
"""
def __init__(self, server):
"""
:param server: :class:`pcaspy.SimpleServer` object
"""
super(ServerThread, self).__init__()
self.server = server
self.running = True
def run(self):
"""
Start the server processing
"""
while self.running:
self.server.process(0.1)
def stop(self):
"""
Stop the server processing
"""
self.running = False
| 22.578947 | 66 | 0.578089 | import threading
class ServerThread(threading.Thread):
def __init__(self, server):
super(ServerThread, self).__init__()
self.server = server
self.running = True
def run(self):
while self.running:
self.server.process(0.1)
def stop(self):
self.running = False
| true | true |
f72b56de21ac740c008099e3f07466941329206c | 6,793 | py | Python | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnobacteriumbovis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Lachnobacterium bovis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def LachnobacteriumBovis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Lachnobacterium bovis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnobacterium bovis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 19:51:00.619514
The undirected graph Lachnobacterium bovis has 2717 nodes and 257981 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06992 and has 23 connected components, where the component with most
nodes has 2631 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 163, the mean node degree is 189.90, and
the node degree mode is 8. The top 5 most central nodes are 140626.JHWB01000013_gene537
(degree 1145), 140626.JHWB01000019_gene960 (degree 1118), 140626.JHWB01000009_gene1319
(degree 1020), 140626.JHWB01000011_gene81 (degree 979) and 140626.JHWB01000022_gene2049
(degree 963).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnobacteriumBovis
# Then load the graph
graph = LachnobacteriumBovis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnobacteriumBovis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.565445 | 223 | 0.708229 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def LachnobacteriumBovis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="LachnobacteriumBovis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f72b570f213204c468b4f1b373c9ca56f111cbd6 | 1,128 | py | Python | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 51 | 2019-05-13T10:51:23.000Z | 2021-09-12T08:11:56.000Z | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 2 | 2020-10-08T16:28:45.000Z | 2021-06-23T03:27:42.000Z | windyquery/validator/field.py | bluerelay/windyquery | 049dc624f4c4a3210d455352b1495db6bd1ff441 | [
"MIT"
] | 3 | 2021-05-10T13:08:21.000Z | 2021-06-20T19:58:30.000Z | from ._base import Base, _rule
from .fullname_json import FullnameJson
from .values.text_val import TextVal
from .values.null import NULL
from .values.holder import Holder
from .values.true import TRUE
from .values.false import FALSE
class Field(FullnameJson, TextVal, NULL, Holder, TRUE, FALSE):
reserved = {**Base.reserved, **TextVal.reserved,
**FullnameJson.reserved, **NULL.reserved, **Holder.reserved, **TRUE.reserved, **FALSE.reserved}
tokens = Base.tokens + TextVal.tokens + \
FullnameJson.tokens + NULL.tokens + Holder.tokens + TRUE.tokens + FALSE.tokens
precedence = FullnameJson.precedence
# Tokens
# rules
_start = 'field'
@_rule('''field : STAR
| NUMBER
| TEXTVAL
| NULL
| TRUE
| FALSE''')
def p_field_items(self, p):
p[0] = self.provider.new_record(p[1])
@_rule('field : HOLDER')
def p_field_param(self, p):
p[0] = self.provider.new_param()
@_rule('field : fullname_json')
def p_field_name(self, p):
p[0] = p[1]
| 29.684211 | 111 | 0.606383 | from ._base import Base, _rule
from .fullname_json import FullnameJson
from .values.text_val import TextVal
from .values.null import NULL
from .values.holder import Holder
from .values.true import TRUE
from .values.false import FALSE
class Field(FullnameJson, TextVal, NULL, Holder, TRUE, FALSE):
reserved = {**Base.reserved, **TextVal.reserved,
**FullnameJson.reserved, **NULL.reserved, **Holder.reserved, **TRUE.reserved, **FALSE.reserved}
tokens = Base.tokens + TextVal.tokens + \
FullnameJson.tokens + NULL.tokens + Holder.tokens + TRUE.tokens + FALSE.tokens
precedence = FullnameJson.precedence
_start = 'field'
@_rule('''field : STAR
| NUMBER
| TEXTVAL
| NULL
| TRUE
| FALSE''')
def p_field_items(self, p):
p[0] = self.provider.new_record(p[1])
@_rule('field : HOLDER')
def p_field_param(self, p):
p[0] = self.provider.new_param()
@_rule('field : fullname_json')
def p_field_name(self, p):
p[0] = p[1]
| true | true |
f72b575498b298ee6a17f438e78286df5deb72f6 | 4,471 | py | Python | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | 2 | 2018-06-07T12:46:22.000Z | 2020-11-21T18:10:44.000Z | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | null | null | null | distributex/service.py | calston/distributex | 06ef0f948f7587ccf2a45305e8de45e9922990cc | [
"MIT"
] | null | null | null | from twisted.application import internet, service
from twisted.web import server, resource, client
from twisted.internet import defer, reactor, threads, utils, task
from zope import interface
import yaml
import time
import cgi
import random
from distributex.backends import in_memory_backend, memcached_backend
class SiteRoot(resource.Resource):
isLeaf = True
addSlash = True
def __init__(self, config):
self.backends = {
'memcache': memcached_backend.MemcachedBackend,
'inmemory': in_memory_backend.InMemoryDictBackend
}
self.config = yaml.load(open(config))
self.ready = False
reactor.callWhenRunning(self.setup)
@defer.inlineCallbacks
def setup(self):
# Initialise the configured backend
self.backend = self.backends[
self.config.get('backend', 'inmemory')
](self.config)
self.pools = {}
# Construct our pools
for pool in self.config.get('pools', []):
if 'servers' in pool:
servers = pool['servers'].replace(' ', '').split(',')
else:
servers = []
self.pools[pool['name']] = servers
expire = pool.get('expire', 1800)
maxlocks = pool.get('maxlocks', 1)
yield defer.maybeDeferred(
self.backend.add_pool, pool['name'], expire, maxlocks=maxlocks
)
self.ready = True
defer.returnValue(None)
def request_finish(self, request, result):
request.write(result)
request.finish()
def stop_timer(self, timer):
if timer.running:
timer.stop()
def wait_finish(self, lock, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'YES')
def wait_bailout(self, error, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'NO')
@defer.inlineCallbacks
def wait_lock(self, d, pool, host):
lock = yield defer.maybeDeferred(
self.backend.get_lock, pool, host
)
if lock:
d.callback(True)
def request_wait(self, request, pool, host):
d = defer.Deferred()
timer = task.LoopingCall(self.wait_lock, d, pool, host)
d.addCallback(self.wait_finish, request, timer)
d.addErrback(self.wait_bailout, request, timer)
request.notifyFinish().addErrback(
lambda _: self.stop_timer(timer)
)
timer.start(1 + random.random(), True)
return d
def request_release(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.release_lock, pool, host
).addCallback(lambda _: self.request_finish(request, 'OK'))
def request_getlock(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.get_lock, pool, host
).addCallback(
lambda l: self.request_finish(request, l and 'YES' or 'NO')
)
def handle_request(self, request):
if not self.ready:
reactor.callLater(0, self.handle_request, request)
else:
call = request.path.replace('/', '')
if not (('host' in request.args) and ('pool' in request.args)):
self.request_finish(request, 'INVALID')
return
host = cgi.escape(request.args["host"][0])
pool = cgi.escape(request.args["pool"][0])
if pool in self.pools:
if self.pools[pool]:
# Server not allowed
if not(host in self.pools[pool]):
self.request_finish(request, 'INVALID')
return
else:
self.request_finish(request, 'INVALID')
return
if call == 'wait':
# Wait for a lock
reactor.callLater(random.random()/5, self.request_wait,
request, pool, host)
elif call == 'release':
# Release a lock
self.request_release(request, pool, host)
elif call == 'get':
# Get a lock, don't wait for it
self.request_getlock(request, pool, host)
else:
self.request_finish(request, 'INVALID')
def render_GET(self, request):
self.handle_request(request)
return server.NOT_DONE_YET
| 29.609272 | 78 | 0.569895 | from twisted.application import internet, service
from twisted.web import server, resource, client
from twisted.internet import defer, reactor, threads, utils, task
from zope import interface
import yaml
import time
import cgi
import random
from distributex.backends import in_memory_backend, memcached_backend
class SiteRoot(resource.Resource):
isLeaf = True
addSlash = True
def __init__(self, config):
self.backends = {
'memcache': memcached_backend.MemcachedBackend,
'inmemory': in_memory_backend.InMemoryDictBackend
}
self.config = yaml.load(open(config))
self.ready = False
reactor.callWhenRunning(self.setup)
@defer.inlineCallbacks
def setup(self):
self.backend = self.backends[
self.config.get('backend', 'inmemory')
](self.config)
self.pools = {}
for pool in self.config.get('pools', []):
if 'servers' in pool:
servers = pool['servers'].replace(' ', '').split(',')
else:
servers = []
self.pools[pool['name']] = servers
expire = pool.get('expire', 1800)
maxlocks = pool.get('maxlocks', 1)
yield defer.maybeDeferred(
self.backend.add_pool, pool['name'], expire, maxlocks=maxlocks
)
self.ready = True
defer.returnValue(None)
def request_finish(self, request, result):
request.write(result)
request.finish()
def stop_timer(self, timer):
if timer.running:
timer.stop()
def wait_finish(self, lock, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'YES')
def wait_bailout(self, error, request, timer):
self.stop_timer(timer)
self.request_finish(request, 'NO')
@defer.inlineCallbacks
def wait_lock(self, d, pool, host):
lock = yield defer.maybeDeferred(
self.backend.get_lock, pool, host
)
if lock:
d.callback(True)
def request_wait(self, request, pool, host):
d = defer.Deferred()
timer = task.LoopingCall(self.wait_lock, d, pool, host)
d.addCallback(self.wait_finish, request, timer)
d.addErrback(self.wait_bailout, request, timer)
request.notifyFinish().addErrback(
lambda _: self.stop_timer(timer)
)
timer.start(1 + random.random(), True)
return d
def request_release(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.release_lock, pool, host
).addCallback(lambda _: self.request_finish(request, 'OK'))
def request_getlock(self, request, pool, host):
d = defer.maybeDeferred(
self.backend.get_lock, pool, host
).addCallback(
lambda l: self.request_finish(request, l and 'YES' or 'NO')
)
def handle_request(self, request):
if not self.ready:
reactor.callLater(0, self.handle_request, request)
else:
call = request.path.replace('/', '')
if not (('host' in request.args) and ('pool' in request.args)):
self.request_finish(request, 'INVALID')
return
host = cgi.escape(request.args["host"][0])
pool = cgi.escape(request.args["pool"][0])
if pool in self.pools:
if self.pools[pool]:
if not(host in self.pools[pool]):
self.request_finish(request, 'INVALID')
return
else:
self.request_finish(request, 'INVALID')
return
if call == 'wait':
reactor.callLater(random.random()/5, self.request_wait,
request, pool, host)
elif call == 'release':
self.request_release(request, pool, host)
elif call == 'get':
self.request_getlock(request, pool, host)
else:
self.request_finish(request, 'INVALID')
def render_GET(self, request):
self.handle_request(request)
return server.NOT_DONE_YET
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.