code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import sys
import numpy as np
from abc import ABCMeta, abstractmethod
class OptimizationTestFunction:
__metaclass__ = ABCMeta
"""
General class for Test Functions used for optimization
"""
def __init__(self, mindim=1, maxdim=None, domain=np.array([-1, 1])):
self.mindim = mindim
self.maxdim = maxdim
self.domain = domain
@staticmethod
def function(x):
return np.sum(np.abs(x))
@abstractmethod
def minimum(self, ndim):
pass
def fminimum(self, ndim):
x = self.minimum(ndim)
return self.function(x)
def get_plot_matrices(self, shape=None):
if shape is None:
shape = [200, 200]
if self.domain.ndim == 1:
dx = float(self.domain[1] - self.domain[0]) / (shape[0])
X, Y = np.mgrid[self.domain[0]:self.domain[1]:dx, self.domain[0]:self.domain[1]:dx]
else:
dx = float(self.domain[0, 1] - self.domain[0, 0]) / (shape[0])
dy = float(self.domain[1, 1] - self.domain[1, 0]) / (shape[1])
X, Y = np.mgrid[self.domain[0, 0]:self.domain[0, 1]:dx, self.domain[1, 0]:self.domain[1, 1]:dy]
Z = self.function(np.array([X, Y]))
return X, Y, Z
class Sphere(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
x = np.array(x)
return np.sum(x.T * x.T, axis=-1).T
def minimum(self, ndim):
return np.zeros(ndim)
class Ackley(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
n = len(x)
exp1 = np.exp(-0.2 * np.sqrt(1.0 / n * np.sum(x * x)))
exp2 = np.exp(1.0 / n * np.sum((np.cos(2 * np.pi * x)).T, axis=-1).T)
return -20 * exp1 - exp2 + np.e + 20
def minimum(self, ndim):
return np.zeros(ndim)
class Rosenbrock(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0).T, axis=-1).T
def minimum(self, ndim):
return np.ones(ndim)
class Beale(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-4.5, 4.5]))
@staticmethod
def function(x):
return (1.5 - x[0] + x[0] * x[1]) ** 2 + (2.25 - x[0] + x[0] * x[1] * x[1]) ** 2 + (2.625 - x[0] + x[0] * x[1] *
x[1] * x[1]) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([3.0, 0.5])
class GoldsteinPrice(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-2, 2]))
@staticmethod
def function(x):
factor1 = (19 - 14 * x[0] + 3 * x[0] ** 2 - 14 * x[1] + 6 * x[0] * x[1] + 3 * x[1] ** 2)
factor2 = (18 - 32 * x[0] + 12 * x[0] ** 2 + 48 * x[1] - 36 * x[0] * x[1] + 27 * x[1] ** 2)
return (1 + ((x[0] + x[1] + 1) ** 2) * factor1) * (30 + ((2 * x[0] - 3 * x[1]) ** 2) * factor2)
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, -1.0])
class Booth(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return (x[0] + 2 * x[1] - 7) ** 2 + (2 * x[0] + x[1] - 5) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, -3.0])
class BukinN6(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-15, 15], [-3, 3]]))
@staticmethod
def function(x):
return 100 * np.sqrt(np.abs(x[1] - 0.01 * x[0] ** 2)) + 0.01 * np.abs(x[0] + 10)
def minimum(self, ndim):
assert ndim == 2
return np.array([10.0, 1.0])
class Matyas(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class LeviN13(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
term1 = (np.sin(3 * np.pi * x[0])) ** 3
term2 = (x[0] - 1) ** 2 * (1 + (np.sin(3 * np.pi * x[1])) ** 3)
term3 = (x[1] - 1) ** 2 * (1 + (np.sin(2 * np.pi * x[1])) ** 2)
return term1 + term2 + term3
def minimum(self, ndim):
assert ndim == 2
return np.array([1.0, 1.0])
class ThreeHump(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return 2 * x[1] ** 2 - 1.05 * x[0] ** 4 + 1.0 / 6.0 * x[0] ** 6 + x[0] * x[1] + x[1] ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0.0, 0.0])
class Easom(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return -np.cos(x[0]) * np.cos(x[1]) * np.exp(-1 * ((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2))
def minimum(self, ndim):
assert ndim == 2
return np.array([np.pi, np.pi])
class CrossInTray(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
factor1 = np.exp(np.abs(100 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi))
return -1E-4 * (np.abs(np.sin(x[0]) * np.sin(x[1]) * factor1) + 1) ** 0.1
def minimum(self, ndim):
assert ndim == 2
return np.array([1.34941, 1.34941])
class Eggholder(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-512, 512]))
@staticmethod
def function(x):
return -1.0 * (x[1] + 47) * np.sin(np.sqrt(np.abs(x[1] + x[0] / 2.0 + 47))) - x[0] * np.sin(
np.sqrt(np.abs(x[0] - x[1] - 47)))
def minimum(self, ndim):
assert ndim == 2
return np.array([512, 404.2319])
class HolderTable(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-10, 10]))
@staticmethod
def function(x):
return -1.0 * np.abs(np.sin(x[0]) * np.cos(x[1]) * np.exp(np.abs(1 - np.sqrt(x[0] ** 2 + x[1] ** 2) / np.pi)))
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.664559])
class McCormick(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([[-1.5, 4], [-3, 4]]))
@staticmethod
def function(x):
return np.sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1
def minimum(self, ndim):
assert ndim == 2
return np.array([8.05502, 9.66459])
class SchafferN2(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.sin(x[0] ** 2 - x[1] ** 2)) ** 2 - 0.5) / (1 + 1E-3 * (x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.zeros(2)
class SchafferN4(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-100, 100]))
@staticmethod
def function(x):
return 0.5 + ((np.cos(np.sin(np.abs(x[0] ** 2 - x[1] ** 2)))) ** 2 - 0.5) / (1 + 1E-3 * (
x[0] ** 2 + x[1] ** 2)) ** 2
def minimum(self, ndim):
assert ndim == 2
return np.array([0, 1.25313])
class StyblinskiTang(OptimizationTestFunction):
def __init__(self):
OptimizationTestFunction.__init__(self, mindim=1, maxdim=None, domain=np.array([-5, 5]))
@staticmethod
def function(x):
return np.sum((x ** 4 - 16 * x ** 2 + 5 * x).T, axis=-1).T / 2.0
def minimum(self, ndim):
return -2.903534 * np.ones(ndim)
# class Simionescu(OptimizationTestFunction):
#
# def __init__(self):
# OptimizationTestFunction.__init__(self, mindim=2, maxdim=2, domain=np.array([-1.25, 1.25]))
#
# @staticmethod
# def function(x):
# rt = 1
# rs = 0.2
# n = 8
# return np.piecewise(x,
# [x[0]**2 + x[1]**2 <= (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2,
# x[0]**2 + x[1]**2 > (rt + rs*np.cos(n*np.arctan(x[0]/x[1])))**2], [0.1*x[0]*x[1], 1])
#
#
# def minimum(self, ndim):
# assert ndim == 2
# return -0.84852813*np.ones(ndim)
def all_tests_functions():
current_module = sys.modules[__name__]
f = current_module.__dict__
return [f[x]() for x in f if hasattr(f[x], '__base__') and f[x].__base__ == OptimizationTestFunction]
| [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.exp",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.sin"
] | [((261, 278), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (269, 278), True, 'import numpy as np\n'), ((1454, 1465), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1462, 1465), True, 'import numpy as np\n'), ((1555, 1569), 'numpy.zeros', 'np.zeros', (['ndim'], {}), '(ndim)\n', (1563, 1569), True, 'import numpy as np\n'), ((2023, 2037), 'numpy.zeros', 'np.zeros', (['ndim'], {}), '(ndim)\n', (2031, 2037), True, 'import numpy as np\n'), ((2389, 2402), 'numpy.ones', 'np.ones', (['ndim'], {}), '(ndim)\n', (2396, 2402), True, 'import numpy as np\n'), ((2907, 2927), 'numpy.array', 'np.array', (['[3.0, 0.5]'], {}), '([3.0, 0.5])\n', (2915, 2927), True, 'import numpy as np\n'), ((3507, 3528), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (3515, 3528), True, 'import numpy as np\n'), ((3871, 3892), 'numpy.array', 'np.array', (['[1.0, -3.0]'], {}), '([1.0, -3.0])\n', (3879, 3892), True, 'import numpy as np\n'), ((4266, 4287), 'numpy.array', 'np.array', (['[10.0, 1.0]'], {}), '([10.0, 1.0])\n', (4274, 4287), True, 'import numpy as np\n'), ((4627, 4647), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4635, 4647), True, 'import numpy as np\n'), ((5150, 5170), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5158, 5170), True, 'import numpy as np\n'), ((5544, 5564), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5552, 5564), True, 'import numpy as np\n'), ((5941, 5965), 'numpy.array', 'np.array', (['[np.pi, np.pi]'], {}), '([np.pi, np.pi])\n', (5949, 5965), True, 'import numpy as np\n'), ((6404, 6432), 'numpy.array', 'np.array', (['[1.34941, 1.34941]'], {}), '([1.34941, 1.34941])\n', (6412, 6432), True, 'import numpy as np\n'), ((6858, 6883), 'numpy.array', 'np.array', (['[512, 404.2319]'], {}), '([512, 404.2319])\n', (6866, 6883), True, 'import numpy as np\n'), ((7280, 7309), 'numpy.array', 'np.array', (['[8.05502, 9.664559]'], {}), '([8.05502, 9.664559])\n', (7288, 7309), True, 'import numpy as np\n'), ((7682, 7710), 'numpy.array', 'np.array', (['[8.05502, 9.66459]'], {}), '([8.05502, 9.66459])\n', (7690, 7710), True, 'import numpy as np\n'), ((8099, 8110), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (8107, 8110), True, 'import numpy as np\n'), ((8528, 8550), 'numpy.array', 'np.array', (['[0, 1.25313]'], {}), '([0, 1.25313])\n', (8536, 8550), True, 'import numpy as np\n'), ((430, 439), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (436, 439), True, 'import numpy as np\n'), ((1198, 1214), 'numpy.array', 'np.array', (['[X, Y]'], {}), '([X, Y])\n', (1206, 1214), True, 'import numpy as np\n'), ((1481, 1507), 'numpy.sum', 'np.sum', (['(x.T * x.T)'], {'axis': '(-1)'}), '(x.T * x.T, axis=-1)\n', (1487, 1507), True, 'import numpy as np\n'), ((2260, 2345), 'numpy.sum', 'np.sum', (['(100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0).T'], {'axis': '(-1)'}), '((100.0 * (x[1:] - x[:-1] ** 2.0) ** 2.0 + (1 - x[:-1]) ** 2.0).T,\n axis=-1)\n', (2266, 2345), True, 'import numpy as np\n'), ((4868, 4892), 'numpy.sin', 'np.sin', (['(3 * np.pi * x[0])'], {}), '(3 * np.pi * x[0])\n', (4874, 4892), True, 'import numpy as np\n'), ((5814, 5870), 'numpy.exp', 'np.exp', (['(-1 * ((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2))'], {}), '(-1 * ((x[0] - np.pi) ** 2 + (x[1] - np.pi) ** 2))\n', (5820, 5870), True, 'import numpy as np\n'), ((8892, 8905), 'numpy.ones', 'np.ones', (['ndim'], {}), '(ndim)\n', (8899, 8905), True, 'import numpy as np\n'), ((1383, 1400), 'numpy.array', 'np.array', (['[-5, 5]'], {}), '([-5, 5])\n', (1391, 1400), True, 'import numpy as np\n'), ((1714, 1731), 'numpy.array', 'np.array', (['[-5, 5]'], {}), '([-5, 5])\n', (1722, 1731), True, 'import numpy as np\n'), ((2186, 2203), 'numpy.array', 'np.array', (['[-5, 5]'], {}), '([-5, 5])\n', (2194, 2203), True, 'import numpy as np\n'), ((2543, 2564), 'numpy.array', 'np.array', (['[-4.5, 4.5]'], {}), '([-4.5, 4.5])\n', (2551, 2564), True, 'import numpy as np\n'), ((3077, 3094), 'numpy.array', 'np.array', (['[-2, 2]'], {}), '([-2, 2])\n', (3085, 3094), True, 'import numpy as np\n'), ((3669, 3688), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (3677, 3688), True, 'import numpy as np\n'), ((4035, 4065), 'numpy.array', 'np.array', (['[[-15, 15], [-3, 3]]'], {}), '([[-15, 15], [-3, 3]])\n', (4043, 4065), True, 'import numpy as np\n'), ((4178, 4195), 'numpy.abs', 'np.abs', (['(x[0] + 10)'], {}), '(x[0] + 10)\n', (4184, 4195), True, 'import numpy as np\n'), ((4429, 4448), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (4437, 4448), True, 'import numpy as np\n'), ((4790, 4809), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (4798, 4809), True, 'import numpy as np\n'), ((5315, 5334), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (5323, 5334), True, 'import numpy as np\n'), ((5705, 5726), 'numpy.array', 'np.array', (['[-100, 100]'], {}), '([-100, 100])\n', (5713, 5726), True, 'import numpy as np\n'), ((5799, 5811), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (5805, 5811), True, 'import numpy as np\n'), ((6112, 6131), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (6120, 6131), True, 'import numpy as np\n'), ((6577, 6598), 'numpy.array', 'np.array', (['[-512, 512]'], {}), '([-512, 512])\n', (6585, 6598), True, 'import numpy as np\n'), ((7030, 7049), 'numpy.array', 'np.array', (['[-10, 10]'], {}), '([-10, 10])\n', (7038, 7049), True, 'import numpy as np\n'), ((7454, 7484), 'numpy.array', 'np.array', (['[[-1.5, 4], [-3, 4]]'], {}), '([[-1.5, 4], [-3, 4]])\n', (7462, 7484), True, 'import numpy as np\n'), ((7856, 7877), 'numpy.array', 'np.array', (['[-100, 100]'], {}), '([-100, 100])\n', (7864, 7877), True, 'import numpy as np\n'), ((8256, 8277), 'numpy.array', 'np.array', (['[-100, 100]'], {}), '([-100, 100])\n', (8264, 8277), True, 'import numpy as np\n'), ((8703, 8720), 'numpy.array', 'np.array', (['[-5, 5]'], {}), '([-5, 5])\n', (8711, 8720), True, 'import numpy as np\n'), ((8777, 8826), 'numpy.sum', 'np.sum', (['(x ** 4 - 16 * x ** 2 + 5 * x).T'], {'axis': '(-1)'}), '((x ** 4 - 16 * x ** 2 + 5 * x).T, axis=-1)\n', (8783, 8826), True, 'import numpy as np\n'), ((4136, 4167), 'numpy.abs', 'np.abs', (['(x[1] - 0.01 * x[0] ** 2)'], {}), '(x[1] - 0.01 * x[0] ** 2)\n', (4142, 4167), True, 'import numpy as np\n'), ((4939, 4963), 'numpy.sin', 'np.sin', (['(3 * np.pi * x[1])'], {}), '(3 * np.pi * x[1])\n', (4945, 4963), True, 'import numpy as np\n'), ((5011, 5035), 'numpy.sin', 'np.sin', (['(2 * np.pi * x[1])'], {}), '(2 * np.pi * x[1])\n', (5017, 5035), True, 'import numpy as np\n'), ((5784, 5796), 'numpy.cos', 'np.cos', (['x[0]'], {}), '(x[0])\n', (5790, 5796), True, 'import numpy as np\n'), ((1839, 1852), 'numpy.sum', 'np.sum', (['(x * x)'], {}), '(x * x)\n', (1845, 1852), True, 'import numpy as np\n'), ((6211, 6241), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (6218, 6241), True, 'import numpy as np\n'), ((6691, 6721), 'numpy.abs', 'np.abs', (['(x[1] + x[0] / 2.0 + 47)'], {}), '(x[1] + x[0] / 2.0 + 47)\n', (6697, 6721), True, 'import numpy as np\n'), ((6761, 6785), 'numpy.abs', 'np.abs', (['(x[0] - x[1] - 47)'], {}), '(x[0] - x[1] - 47)\n', (6767, 6785), True, 'import numpy as np\n'), ((7120, 7132), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (7126, 7132), True, 'import numpy as np\n'), ((7135, 7147), 'numpy.cos', 'np.cos', (['x[1]'], {}), '(x[1])\n', (7141, 7147), True, 'import numpy as np\n'), ((7541, 7560), 'numpy.sin', 'np.sin', (['(x[0] + x[1])'], {}), '(x[0] + x[1])\n', (7547, 7560), True, 'import numpy as np\n'), ((7942, 7971), 'numpy.sin', 'np.sin', (['(x[0] ** 2 - x[1] ** 2)'], {}), '(x[0] ** 2 - x[1] ** 2)\n', (7948, 7971), True, 'import numpy as np\n'), ((1895, 1916), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1901, 1916), True, 'import numpy as np\n'), ((6283, 6295), 'numpy.sin', 'np.sin', (['x[0]'], {}), '(x[0])\n', (6289, 6295), True, 'import numpy as np\n'), ((6298, 6310), 'numpy.sin', 'np.sin', (['x[1]'], {}), '(x[1])\n', (6304, 6310), True, 'import numpy as np\n'), ((8356, 8385), 'numpy.abs', 'np.abs', (['(x[0] ** 2 - x[1] ** 2)'], {}), '(x[0] ** 2 - x[1] ** 2)\n', (8362, 8385), True, 'import numpy as np\n'), ((7168, 7198), 'numpy.sqrt', 'np.sqrt', (['(x[0] ** 2 + x[1] ** 2)'], {}), '(x[0] ** 2 + x[1] ** 2)\n', (7175, 7198), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 18:02:51 2018
@author: <NAME>
"""
import numpy
from .valueaxis import ValueAxis
from .UnitsManager import frequency_units
class TimeAxis(ValueAxis):
""" Class representing time in time dependent calculations.
The `TimeAxis` class stands in a close relation to `FrequencyAxis`.
`FrequencyAxis` represents the frequencies one obtains in the Fourier
transform of a function of the `TimeAxis`. By default,
`TimeAxis` is of the type `upper-half` which
means that by specifying the `start`, `length` and `step` we
represent the upper half of the interval `<start-length*step,
start+(length-1)*step>`. The Fourier transform of a time dependent
object defined on the `TimeAxis` will then have twice as many points as
the `TimeAxis` (for time axis with start=0.0 there will be one point less
in frequency in order not to duplicate value for zero time when some time
symmetry is present). This is usefull when the time dependent object has some
special symmetries. One example is the so-called quantum bath correlation
function which fulfills the relation (in LaTeX)
C(-t) = C^{*}(t)
Parameters
----------
start : float
start of the TimeAxis
length : int
number of steps
step : float
time step
atype : string {"complete","upper-half"}
Axis type
Attributes
----------
data : float array
Holds the values of time, it is equivalent to the atribute
TimeAxis.time
"""
def __init__(self, start=0.0, length=1, step=1.0,
atype="upper-half", frequency_start=0.0):
ValueAxis.__init__(self, start=start,
length=length, step=step)
self.frequency_start = frequency_start
self.allowed_atypes = ["upper-half", "complete"]
if atype in self.allowed_atypes:
self.atype = atype
else:
raise Exception("Unknown time axis type")
def get_FrequencyAxis(self):
""" Returns corresponding FrequencyAxis object
"""
from .frequencyaxis import FrequencyAxis
if self.atype == 'complete':
# This correspond to the definition of angular frequency omega in
# FourierTransform module
frequencies = numpy.fft.fftshift(
(2.0*numpy.pi)*numpy.fft.fftfreq(self.length, self.step))
step = frequencies[1]-frequencies[0]
start = frequencies[0] + self.frequency_start
nosteps = len(frequencies)
time_start = self.data[self.length//2]
elif self.atype == 'upper-half':
# if self.start == 0.0:
# frequencies = numpy.fft.fftshift(
# (2.0*numpy.pi)*numpy.fft.fftfreq(2*self.length-1, self.step))
# else:
frequencies = numpy.fft.fftshift(
(2.0*numpy.pi)*numpy.fft.fftfreq(2*self.length, self.step))
# TODO: Check if different definition would not produce error in starting value ~ 4.441e-16 for trasformation to frequency and back
start = frequencies[0] + self.frequency_start
step = frequencies[1] - frequencies[0]
nosteps = len(frequencies)
time_start = self.min
else:
raise Exception("Unknown time axis type")
# this creation has to be protected from units management
with frequency_units("int"):
faxis = FrequencyAxis(start, nosteps, step,
atype=self.atype, time_start=time_start)
return faxis
def get_rFrequencyAxis(self):
""" Returns corresponding FrequencyAxis object
"""
from .frequencyaxis import FrequencyAxis
if self.atype == 'upper-half':
frequencies = numpy.fft.fftshift(
(2.0*numpy.pi)*numpy.fft.fftfreq(2*self.length-2, self.step))
start = frequencies[0] + self.frequency_start
step = frequencies[1] - frequencies[0]
nosteps = len(frequencies)
time_start = self.min
else:
raise Exception("Unknown time axis type")
# this creation has to be protected from units management
with frequency_units("int"):
faxis = FrequencyAxis(start, nosteps, step,
atype=self.atype, time_start=time_start)
return faxis
| [
"numpy.fft.fftfreq"
] | [((2492, 2533), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['self.length', 'self.step'], {}), '(self.length, self.step)\n', (2509, 2533), False, 'import numpy\n'), ((4092, 4141), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['(2 * self.length - 2)', 'self.step'], {}), '(2 * self.length - 2, self.step)\n', (4109, 4141), False, 'import numpy\n'), ((3083, 3128), 'numpy.fft.fftfreq', 'numpy.fft.fftfreq', (['(2 * self.length)', 'self.step'], {}), '(2 * self.length, self.step)\n', (3100, 3128), False, 'import numpy\n')] |
import glob
import pandas as pd
import numpy as np
import os
filenames = glob.glob("*.csv")
filenames = [filename for filename in filenames if os.path.getsize(filename) > 10000]
#filenames = ["CreditRequirement.csv"]
timestamp_col = "Complete Timestamp" # column that indicates completion timestamp
case_id_col = "Case ID"
activity_col = "Activity"
def add_all_columns(group):
group = group.sort_values(timestamp_col, ascending=True, kind="mergesort")
group["event_nr"] = range(1,group.shape[0]+1)
group["unique_events"] = group[activity_col].nunique()
group["total_events"] = len(group[activity_col])
end_date = group[timestamp_col].iloc[-1]
tmp = end_date - group[timestamp_col]
tmp = tmp.fillna(0)
start_date = group[timestamp_col].iloc[0]
elapsed = group[timestamp_col] - start_date
elapsed = elapsed.fillna(0)
group["elapsed"] = elapsed.apply(lambda x: float(x / np.timedelta64(1, 'D')))
group["remtime"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'D'))) # D is for days
#group["case_length"] = group.shape[0]
return group
with open("log_summary.tsv", 'w') as fout:
fout.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (
"log", "total_cases", "unique_activities", "total_events","avg_unique_events_per_trace", "mean_case_length",
"std_case_length", "mean_case_duration","std_case_duration","mean_remtime","std_remtime"))
for filename in filenames:
print(filename)
# dtypes = {col:"str" for col in ["proctime", "elapsed", "label", "last"]} # prevent type coercion
data = pd.read_csv(filename, sep=";")
data[timestamp_col] = pd.to_datetime(data[timestamp_col])
data = data.groupby(case_id_col).apply(add_all_columns)
df0 = data.loc[data["event_nr"] == 1].copy()
df0["UER"] = df0["unique_events"] / df0["total_events"]
#print("Avg percentage of unique timestamps per trace: %.3f" %np.mean(df0["UTR"]))
#print("%s out of %s unique timestamps" %(len(data[timestamp_col].unique()),data[timestamp_col].count()))
global_unique_timestamps = len(data[timestamp_col].unique()) / data[timestamp_col].count()
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
#print("In %s of them elapsed time is still 0" %len(df.loc[df["elapsed"]==0]))
#print("%s cases that reach length %d" %(df.shape[0],cutoff))
fout.write("%s, %s, %s, %s, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f\n"%(filename,
data[case_id_col].nunique(),
data[activity_col].nunique(),
data.shape[0],
np.mean(df0["UER"]),
np.mean(df0["total_events"]),
np.std(df0["total_events"]),
np.mean(df0["remtime"]),
np.std(df0["remtime"]),
np.mean(data["remtime"]),
np.std(data["remtime"])))
| [
"numpy.mean",
"os.path.getsize",
"pandas.read_csv",
"glob.glob",
"numpy.std",
"numpy.timedelta64",
"pandas.to_datetime"
] | [((75, 93), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (84, 93), False, 'import glob\n'), ((1585, 1615), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""";"""'}), "(filename, sep=';')\n", (1596, 1615), True, 'import pandas as pd\n'), ((1646, 1681), 'pandas.to_datetime', 'pd.to_datetime', (['data[timestamp_col]'], {}), '(data[timestamp_col])\n', (1660, 1681), True, 'import pandas as pd\n'), ((145, 170), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (160, 170), False, 'import os\n'), ((918, 940), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (932, 940), True, 'import numpy as np\n'), ((996, 1018), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (1010, 1018), True, 'import numpy as np\n'), ((2883, 2902), 'numpy.mean', 'np.mean', (["df0['UER']"], {}), "(df0['UER'])\n", (2890, 2902), True, 'import numpy as np\n'), ((2985, 3013), 'numpy.mean', 'np.mean', (["df0['total_events']"], {}), "(df0['total_events'])\n", (2992, 3013), True, 'import numpy as np\n'), ((3096, 3123), 'numpy.std', 'np.std', (["df0['total_events']"], {}), "(df0['total_events'])\n", (3102, 3123), True, 'import numpy as np\n'), ((3206, 3229), 'numpy.mean', 'np.mean', (["df0['remtime']"], {}), "(df0['remtime'])\n", (3213, 3229), True, 'import numpy as np\n'), ((3312, 3334), 'numpy.std', 'np.std', (["df0['remtime']"], {}), "(df0['remtime'])\n", (3318, 3334), True, 'import numpy as np\n'), ((3417, 3441), 'numpy.mean', 'np.mean', (["data['remtime']"], {}), "(data['remtime'])\n", (3424, 3441), True, 'import numpy as np\n'), ((3524, 3547), 'numpy.std', 'np.std', (["data['remtime']"], {}), "(data['remtime'])\n", (3530, 3547), True, 'import numpy as np\n')] |
import numpy as np
MAX_GEN = 10000
TAM_POP = 30
TAM_CROM = 15
TX_CROSS = 0.8
TX_MUT = 0.1
def GeraPop(): #linha coluna
return (np.random.randint(0,20, [TAM_POP,TAM_CROM])-10) # gera uma populacao de crom de pesos aleatorios
# entre -10 a 10 com 15 pesos
def sigmoid(x): #retorna sigmoide de um valor , tive que fazer pq no python nao tem
return 1 / (1 + np.exp(-x)) # usei pra fazer testes antes da tanh
def CalculaRede(pesos, x1, x2): # pesos - valores gerados em GeraPop , x1 entrada 0 ou 1, x2 0 e 1 tbm
y0b = np.tanh(x1*pesos[1] + x2*pesos[3] + pesos[10]) #calculo da rede
y0a = np.tanh(x1*pesos[0] + x2*pesos[2] + pesos[11])
y1b = np.tanh(y0a*pesos[5] + y0b*pesos[7] + pesos[12])
y1a = np.tanh(y0a*pesos[4] + y0b*pesos[6] + pesos[13])
y2 = (y1a*pesos[8] + y1b*pesos[9] + pesos[14])
return sigmoid(y2) #valor de saida
def Aptidao(x): #funcao de maximizacao
y1 = abs(CalculaRede(x,0,0)) # erro 1 - |0 - x|
y2 = abs(1 - CalculaRede(x,0,1)) # erro 2 - |1-x|
y3 = abs(1 - CalculaRede(x,1,0)) # erro 3 - |1-x|
y4 = abs(CalculaRede(x,1,1)) # erro 4 - |0-x|
erro = (y1+y2+y3+y4) #erro total
return 1/erro #esse erro tem que ir pra 0, ser minimizado
def CalculaAptidoes(pop): #calculo da aptidao da populacao
return [Aptidao(x) for x in pop] #retorna uma lista de aptidoes de cada cromossomo
def SelecaoRoleta(aptidoes): # reutilizei
percentuais = np.array(aptidoes)/float(sum(aptidoes))
vet = [percentuais[0]]
for p in percentuais[1:]:
vet.append(vet[-1]+p)
r = np.random.random()
for i in range(len(vet)):
if r <= vet[i]:
return i
def Cruzamento(pai,mae):
r1 = np.random.random() #por porcentagem
r2 = 1-r1
filho = (r1*pai + r2*mae)
filha = (r2*pai + r1*mae)
return filho, filha
# corte = np.random.randint(TAM_CROM) #do professor
# return (list(pai[:corte])+list(mae[corte:]),list(mae[:corte])+list(pai[corte:]))
def Mutacao(cromossomo):
r1 = np.random.randint(TAM_CROM) #gera um numero interira aleatoria de 1 a 15 local do vetor
r2 = np.random.rand()*20 -10 #gera numero real de 0 a 1 multiplica por 20 e sublitrai 10
cromossomo[r1] = (cromossomo[r1] + r2)/2 #pega o cromossomo na local r1 e vai somar com r2 e dividir por 2
return cromossomo
pop = GeraPop()
for g in range(MAX_GEN): # maximo de geracoes
aptidoes = CalculaAptidoes(pop) # Quanto maior melhor
print (np.mean(aptidoes)) #media aptidoes
nova_pop = []
for c in range(int(TAM_POP/2)): #vai completar ate que o tamanho da populacao nova seja metade da populacao definida
pai = pop[SelecaoRoleta(aptidoes)] # por conta dos crossovers
mae = pop[SelecaoRoleta(aptidoes)] # gira roleta
r = np.random.random() #gera numero aleatorio
if r <= TX_CROSS: # se r menor Taxa de cross ha cruzamento
filho,filha = Cruzamento(pai,mae) #reaproveitando
else:
filho,filha = pai,mae
r = np.random.random()
if r <= TX_MUT:
filho = Mutacao(filho)
r = np.random.random()
if r <= TX_MUT:
filha = Mutacao(filha)
nova_pop.append(filho) #adiciona na nova_pop
nova_pop.append(filha)
pop = np.array(nova_pop) # apenas para padronizar
aptidoes = CalculaAptidoes(pop)
index_solucao = aptidoes.index(max(aptidoes)) #retorna o indice de maior aptidao
print (pop[index_solucao]) #printa o melhores pesos para calcular o xor
#colocar no console
# 1 - teste = pop[index_solucao] 2 - CalculaRede( pesos, x1, x2) | [
"numpy.mean",
"numpy.random.rand",
"numpy.random.random",
"numpy.tanh",
"numpy.exp",
"numpy.array",
"numpy.random.randint"
] | [((586, 636), 'numpy.tanh', 'np.tanh', (['(x1 * pesos[1] + x2 * pesos[3] + pesos[10])'], {}), '(x1 * pesos[1] + x2 * pesos[3] + pesos[10])\n', (593, 636), True, 'import numpy as np\n'), ((662, 712), 'numpy.tanh', 'np.tanh', (['(x1 * pesos[0] + x2 * pesos[2] + pesos[11])'], {}), '(x1 * pesos[0] + x2 * pesos[2] + pesos[11])\n', (669, 712), True, 'import numpy as np\n'), ((720, 772), 'numpy.tanh', 'np.tanh', (['(y0a * pesos[5] + y0b * pesos[7] + pesos[12])'], {}), '(y0a * pesos[5] + y0b * pesos[7] + pesos[12])\n', (727, 772), True, 'import numpy as np\n'), ((780, 832), 'numpy.tanh', 'np.tanh', (['(y0a * pesos[4] + y0b * pesos[6] + pesos[13])'], {}), '(y0a * pesos[4] + y0b * pesos[6] + pesos[13])\n', (787, 832), True, 'import numpy as np\n'), ((1641, 1659), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1657, 1659), True, 'import numpy as np\n'), ((1789, 1807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1805, 1807), True, 'import numpy as np\n'), ((2109, 2136), 'numpy.random.randint', 'np.random.randint', (['TAM_CROM'], {}), '(TAM_CROM)\n', (2126, 2136), True, 'import numpy as np\n'), ((3426, 3444), 'numpy.array', 'np.array', (['nova_pop'], {}), '(nova_pop)\n', (3434, 3444), True, 'import numpy as np\n'), ((167, 212), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)', '[TAM_POP, TAM_CROM]'], {}), '(0, 20, [TAM_POP, TAM_CROM])\n', (184, 212), True, 'import numpy as np\n'), ((1502, 1520), 'numpy.array', 'np.array', (['aptidoes'], {}), '(aptidoes)\n', (1510, 1520), True, 'import numpy as np\n'), ((2571, 2588), 'numpy.mean', 'np.mean', (['aptidoes'], {}), '(aptidoes)\n', (2578, 2588), True, 'import numpy as np\n'), ((2894, 2912), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2910, 2912), True, 'import numpy as np\n'), ((3135, 3153), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3151, 3153), True, 'import numpy as np\n'), ((3228, 3246), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3244, 3246), True, 'import numpy as np\n'), ((414, 424), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (420, 424), True, 'import numpy as np\n'), ((2207, 2223), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2221, 2223), True, 'import numpy as np\n')] |
import numpy as np
import logging
_logger = logging.getLogger(__name__)
class Buffer(object):
""" A buffer to collect observations until they form a state. """
def __init__(self, sequence_length, width, height, color_channels):
_logger.info("Initializing new object of type " + str(type(self).__name__))
self.buffer = np.zeros((sequence_length,
width,
height,
color_channels), dtype=np.uint8)
self.buffer_size = np.shape(self.buffer)
def add(self, observation):
self.buffer[:-1] = self.buffer[1:]
self.buffer[-1] = observation
def get_state(self):
return self.buffer
def reset(self):
self.buffer *= 0 | [
"logging.getLogger",
"numpy.shape",
"numpy.zeros"
] | [((44, 71), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (61, 71), False, 'import logging\n'), ((344, 418), 'numpy.zeros', 'np.zeros', (['(sequence_length, width, height, color_channels)'], {'dtype': 'np.uint8'}), '((sequence_length, width, height, color_channels), dtype=np.uint8)\n', (352, 418), True, 'import numpy as np\n'), ((542, 563), 'numpy.shape', 'np.shape', (['self.buffer'], {}), '(self.buffer)\n', (550, 563), True, 'import numpy as np\n')] |
import numpy as np
from pyesg.configuration.validation_configuration import ValidationAnalysis
from pyesg.constants.outputs import DISCOUNT_FACTOR
from pyesg.constants.validation_analyses import AVERAGE_DISCOUNT_FACTOR
from pyesg.constants.validation_result_types import MARTINGALE
from pyesg.simulation.utils import extract_yield_curve_from_parameters
from pyesg.validation.utils import get_confidence_level, do_sample_mean_and_confidence_interval_calculations
from pyesg.validation.validators.base_validator import BaseValidator
class AverageDiscountFactorValidator(BaseValidator):
"""
Performs average discount factor validation analysis.
The expected value of the discount value at time t is equal to the point on the initial yield curve for term t.
"""
analysis_id = AVERAGE_DISCOUNT_FACTOR
result_type = MARTINGALE
def _validate(self, analysis_settings: ValidationAnalysis):
confidence_level = get_confidence_level(analysis_settings)
# Start from time step 1 because time step 0 is determinstic and there is no yield associated with it.
time_steps = np.arange(1, self._data_extractor.reader.number_of_time_steps)
discount_factor_sims = self._data_extractor.get_output_simulations(self._asset_class, DISCOUNT_FACTOR, time_steps)
# Get sample mean and upper and lower confidence intervals
results = do_sample_mean_and_confidence_interval_calculations(
array=discount_factor_sims,
confidence_level=confidence_level,
annualisation_factor=self._data_extractor.reader.annualisation_factor
)
# Expected values are points on initial yield curve
yield_curve = extract_yield_curve_from_parameters(self._asset_class.parameters)
expected_values = [yield_curve.get_rate(i / self._data_extractor.reader.annualisation_factor)
for i in np.arange(1, self._data_extractor.reader.number_of_time_steps)]
# Transform results from bond prices to yields
# Don't use "time" key in results because this is constructed assuming time starts at 0
sample_mean = np.asarray(results["sample_mean"])
lower_ci = np.asarray(results["lower_confidence_interval"])
upper_ci = np.asarray(results["upper_confidence_interval"])
def price_to_yield(time, price) -> np.ndarray:
return - np.log(price) / time
# Note that the inverse relationship between bond price and yield means the upper confidence interval for the
# prices corresponds to the lower confidence interval for the yield (and similarly, lower confidence interval
# for prices corresponds to upper confidence interval for the yield)
return {
"time": time_steps.tolist(),
"sample_mean": price_to_yield(time_steps, sample_mean).tolist(),
"lower_confidence_interval": price_to_yield(time_steps, upper_ci).tolist(),
"upper_confidence_interval": price_to_yield(time_steps, lower_ci).tolist(),
"expected_value": expected_values
}
| [
"pyesg.validation.utils.get_confidence_level",
"pyesg.simulation.utils.extract_yield_curve_from_parameters",
"numpy.log",
"numpy.asarray",
"numpy.arange",
"pyesg.validation.utils.do_sample_mean_and_confidence_interval_calculations"
] | [((941, 980), 'pyesg.validation.utils.get_confidence_level', 'get_confidence_level', (['analysis_settings'], {}), '(analysis_settings)\n', (961, 980), False, 'from pyesg.validation.utils import get_confidence_level, do_sample_mean_and_confidence_interval_calculations\n'), ((1114, 1176), 'numpy.arange', 'np.arange', (['(1)', 'self._data_extractor.reader.number_of_time_steps'], {}), '(1, self._data_extractor.reader.number_of_time_steps)\n', (1123, 1176), True, 'import numpy as np\n'), ((1386, 1580), 'pyesg.validation.utils.do_sample_mean_and_confidence_interval_calculations', 'do_sample_mean_and_confidence_interval_calculations', ([], {'array': 'discount_factor_sims', 'confidence_level': 'confidence_level', 'annualisation_factor': 'self._data_extractor.reader.annualisation_factor'}), '(array=\n discount_factor_sims, confidence_level=confidence_level,\n annualisation_factor=self._data_extractor.reader.annualisation_factor)\n', (1437, 1580), False, 'from pyesg.validation.utils import get_confidence_level, do_sample_mean_and_confidence_interval_calculations\n'), ((1700, 1765), 'pyesg.simulation.utils.extract_yield_curve_from_parameters', 'extract_yield_curve_from_parameters', (['self._asset_class.parameters'], {}), '(self._asset_class.parameters)\n', (1735, 1765), False, 'from pyesg.simulation.utils import extract_yield_curve_from_parameters\n'), ((2142, 2176), 'numpy.asarray', 'np.asarray', (["results['sample_mean']"], {}), "(results['sample_mean'])\n", (2152, 2176), True, 'import numpy as np\n'), ((2196, 2244), 'numpy.asarray', 'np.asarray', (["results['lower_confidence_interval']"], {}), "(results['lower_confidence_interval'])\n", (2206, 2244), True, 'import numpy as np\n'), ((2264, 2312), 'numpy.asarray', 'np.asarray', (["results['upper_confidence_interval']"], {}), "(results['upper_confidence_interval'])\n", (2274, 2312), True, 'import numpy as np\n'), ((1904, 1966), 'numpy.arange', 'np.arange', (['(1)', 'self._data_extractor.reader.number_of_time_steps'], {}), '(1, self._data_extractor.reader.number_of_time_steps)\n', (1913, 1966), True, 'import numpy as np\n'), ((2390, 2403), 'numpy.log', 'np.log', (['price'], {}), '(price)\n', (2396, 2403), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.utils.data import Dataset
class TripletSVHN(Dataset):
"""
Based on https://github.com/adambielski/siamese-triplet
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(
self,
dataset: torch.utils.data.Dataset,
indices_train: np.ndarray,
indices_test: np.ndarray,
transform: torchvision.transforms.Compose,
phase: list,
seed: int,
return_labels: bool = False
) -> None:
self.dataset = dataset
self.indices_train = indices_train
self.indices_test = indices_test
self.transform = transform
self.phase = phase
self.seed = seed
self.return_labels = return_labels
if self.phase == 'train':
self.train_labels = self.dataset.labels[self.indices_train]
self.train_data = self.dataset.data[self.indices_train]
self.labels_set = set(self.train_labels)
self.label_to_indices = {label: np.where(self.train_labels == label)[0]
for label in self.labels_set}
else:
self.test_labels = self.dataset.labels[self.indices_test]
self.test_data = self.dataset.data[self.indices_test]
# generate fixed triplets for testing
self.labels_set = set(self.test_labels)
self.label_to_indices = {label: np.where(self.test_labels == label)[0]
for label in self.labels_set}
random_state = np.random.RandomState(self.seed)
triplets = [[i,
random_state.choice(self.label_to_indices[self.test_labels[i].item()]),
random_state.choice(self.label_to_indices[
np.random.choice(
list(self.labels_set - set([self.test_labels[i].item()]))
)
])
]
for i in range(len(self.test_data))]
self.test_triplets = triplets
def __getitem__(
self,
index: int
) -> Tuple[torch.Tensor]:
if self.phase == 'train':
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
img1 = Image.fromarray(np.moveaxis(img1, 0, 2))
img2 = Image.fromarray(np.moveaxis(img2, 0, 2))
img3 = Image.fromarray(np.moveaxis(img3, 0, 2))
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
if self.return_labels:
return (img1, img2, img3), label1
else:
return (img1, img2, img3), []
def __len__(self):
if self.phase == 'train':
return len(self.train_labels)
else:
return len(self.test_labels)
| [
"numpy.random.choice",
"numpy.moveaxis",
"numpy.where",
"numpy.random.RandomState"
] | [((1747, 1779), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (1768, 1779), True, 'import numpy as np\n'), ((2864, 2919), 'numpy.random.choice', 'np.random.choice', (['self.label_to_indices[negative_label]'], {}), '(self.label_to_indices[negative_label])\n', (2880, 2919), True, 'import numpy as np\n'), ((3260, 3283), 'numpy.moveaxis', 'np.moveaxis', (['img1', '(0)', '(2)'], {}), '(img1, 0, 2)\n', (3271, 3283), True, 'import numpy as np\n'), ((3316, 3339), 'numpy.moveaxis', 'np.moveaxis', (['img2', '(0)', '(2)'], {}), '(img2, 0, 2)\n', (3327, 3339), True, 'import numpy as np\n'), ((3372, 3395), 'numpy.moveaxis', 'np.moveaxis', (['img3', '(0)', '(2)'], {}), '(img3, 0, 2)\n', (3383, 3395), True, 'import numpy as np\n'), ((2702, 2749), 'numpy.random.choice', 'np.random.choice', (['self.label_to_indices[label1]'], {}), '(self.label_to_indices[label1])\n', (2718, 2749), True, 'import numpy as np\n'), ((1209, 1245), 'numpy.where', 'np.where', (['(self.train_labels == label)'], {}), '(self.train_labels == label)\n', (1217, 1245), True, 'import numpy as np\n'), ((1613, 1648), 'numpy.where', 'np.where', (['(self.test_labels == label)'], {}), '(self.test_labels == label)\n', (1621, 1648), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import os
import sys
import cvxpy as cp
import random
import pandas as pd
import tkinter
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification, make_blobs
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn_lvq import GlvqModel, GmlvqModel
from utils import compare_cf, perturb
from plausible_counterfactuals import LvqCounterfactual, MatrixLvqCounterfactual, FeasibleCounterfactualOfDecisionTree, FeasibleCounterfactualSoftmax
#modeldesc = "logreg"
modeldesc = "dectree"
#modeldesc = "glvq";n_prototypes=3
if __name__ == "__main__":
features = [2, 4, 8, 16, 32, 64, 128]
unfairness = []
for n_features in features:
n_kf_splits = 4
X, y = make_blobs(n_samples=1000, centers=2, cluster_std=5., n_features=n_features)
scores_cf_perturbation_dist = []
results = {'notFound': 0, 'found': 0}
kf = KFold(n_splits=n_kf_splits)
for train_index, test_index in kf.split(X):
# Split data into training and test set
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# Fit and evaluate classifier
model = None
if modeldesc == "glvq":
model = GlvqModel(prototypes_per_class=n_prototypes)
elif modeldesc == "gmlvq":
model = GmlvqModel(prototypes_per_class=n_prototypes)
elif modeldesc == "logreg":
model = LogisticRegression(multi_class='multinomial')
elif modeldesc == "dectree":
model = DecisionTreeClassifier(max_depth=7)
model.fit(X_train, y_train)
# Compute accuracy on test set
y_pred = model.predict(X_test)
print(f"F1-score: {f1_score(y_test, y_pred, average='weighted')}")
labels = np.unique(y)
# Compute counterfactual of each test sample
for i in range(X_test.shape[0]):
x_orig_orig = X_test[i,:]
y_orig = y_test[i]
y_target = random.choice(list(filter(lambda l: l != y_test[i], labels)))
cf = None
if modeldesc == "logreg":
cf = FeasibleCounterfactualSoftmax(model.coef_, model.intercept_, X=X_train, ellipsoids_r=np.array([]), gmm_weights=np.array([0]), gmm_means=np.array([]), gmm_covariances=np.array([]), projection_matrix=None, projection_mean_sub=None)
elif modeldesc == "glvq":
cf = LvqCounterfactual(model, X=X_train, ellipsoids_r=np.array([]), gmm_weights=np.array([0]), gmm_means=np.array([]), gmm_covariances=np.array([]), projection_matrix=None, projection_mean_sub=None)
elif modeldesc == "gmlvq":
cf = MatrixLvqCounterfactual(model, X=X_train, ellipsoids_r=np.array([]), gmm_weights=np.array([0]), gmm_means=np.array([]), gmm_covariances=np.array([]), projection_matrix=None, projection_mean_sub=None)
elif modeldesc == "dectree":
cf = FeasibleCounterfactualOfDecisionTree(model, X=X_train, ellipsoids_r=np.array([]), gmm_weights=np.array([0]), gmm_means=np.array([]), gmm_covariances=np.array([]), projection_matrix=None, projection_mean_sub=None)
xcf = cf.compute_counterfactual(x_orig_orig, y_target=y_target, use_density_constraints=False)
if xcf is None:
#print("No counterfactual found!")
results["notFound"] += 1
continue
# Compute counterfactual of perturbed sample
x_perturb = perturb(x_orig_orig) # Perturb original data point
x_perturb_t = [x_perturb]
if model.predict(x_perturb_t) != y_orig:
print("Perturbed sample is missclassified")
x_perturbed_cf = cf.compute_counterfactual(x_perturb, y_target=y_target, use_density_constraints=False)
if x_perturbed_cf is None:
#print("No counterfactual of perturbed sample found!")
results["notFound"] += 1
continue
# Evaluate and store closeness
results['found'] += 1
cf_perturbation_dist = compare_cf(xcf, x_perturbed_cf) # Distance between counterfactual of perturned and original sample
scores_cf_perturbation_dist.append(cf_perturbation_dist)
print(f"n_features={n_features}")
print(f"Not found {results['notFound']}/{results['notFound'] + results['found']}")
print("Without density constrain: Median: {0} Mean: {1} Var: {2}".format(np.median(scores_cf_perturbation_dist), np.mean(scores_cf_perturbation_dist), np.var(scores_cf_perturbation_dist)))
unfairness.append([np.median(scores_cf_perturbation_dist), np.mean(scores_cf_perturbation_dist), np.var(scores_cf_perturbation_dist)])
# Summary plot
unfairness = np.array(unfairness)[:,0] # Select the median only!
plt.plot(features, unfairness, 'o-', label="Median (Un)fairness")
plt.xlabel("Number of features")
plt.ylabel("Dist Cf of original vs. perturbed sample")
plt.xticks(features)
plt.legend()
plt.show()
#plt.savefig(f"exp_results_curseofdimensionality/{modeldesc}_perturbation_dist_curseofdimensionality.pdf", dpi=500, bbox_inches='tight', pad_inches = 0) | [
"matplotlib.pyplot.ylabel",
"numpy.array",
"utils.perturb",
"sklearn.model_selection.KFold",
"sklearn_lvq.GmlvqModel",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.datasets.make_blobs",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.xticks",
"utils.co... | [((150, 173), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (164, 173), False, 'import matplotlib\n'), ((5297, 5362), 'matplotlib.pyplot.plot', 'plt.plot', (['features', 'unfairness', '"""o-"""'], {'label': '"""Median (Un)fairness"""'}), "(features, unfairness, 'o-', label='Median (Un)fairness')\n", (5305, 5362), True, 'import matplotlib.pyplot as plt\n'), ((5367, 5399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of features"""'], {}), "('Number of features')\n", (5377, 5399), True, 'import matplotlib.pyplot as plt\n'), ((5404, 5458), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dist Cf of original vs. perturbed sample"""'], {}), "('Dist Cf of original vs. perturbed sample')\n", (5414, 5458), True, 'import matplotlib.pyplot as plt\n'), ((5463, 5483), 'matplotlib.pyplot.xticks', 'plt.xticks', (['features'], {}), '(features)\n', (5473, 5483), True, 'import matplotlib.pyplot as plt\n'), ((5488, 5500), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5498, 5500), True, 'import matplotlib.pyplot as plt\n'), ((5505, 5515), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5513, 5515), True, 'import matplotlib.pyplot as plt\n'), ((958, 1035), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(1000)', 'centers': '(2)', 'cluster_std': '(5.0)', 'n_features': 'n_features'}), '(n_samples=1000, centers=2, cluster_std=5.0, n_features=n_features)\n', (968, 1035), False, 'from sklearn.datasets import make_classification, make_blobs\n'), ((1137, 1164), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_kf_splits'}), '(n_splits=n_kf_splits)\n', (1142, 1164), False, 'from sklearn.model_selection import KFold\n'), ((5241, 5261), 'numpy.array', 'np.array', (['unfairness'], {}), '(unfairness)\n', (5249, 5261), True, 'import numpy as np\n'), ((2110, 2122), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2119, 2122), True, 'import numpy as np\n'), ((1517, 1561), 'sklearn_lvq.GlvqModel', 'GlvqModel', ([], {'prototypes_per_class': 'n_prototypes'}), '(prototypes_per_class=n_prototypes)\n', (1526, 1561), False, 'from sklearn_lvq import GlvqModel, GmlvqModel\n'), ((3889, 3909), 'utils.perturb', 'perturb', (['x_orig_orig'], {}), '(x_orig_orig)\n', (3896, 3909), False, 'from utils import compare_cf, perturb\n'), ((4546, 4577), 'utils.compare_cf', 'compare_cf', (['xcf', 'x_perturbed_cf'], {}), '(xcf, x_perturbed_cf)\n', (4556, 4577), False, 'from utils import compare_cf, perturb\n'), ((4945, 4983), 'numpy.median', 'np.median', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (4954, 4983), True, 'import numpy as np\n'), ((4985, 5021), 'numpy.mean', 'np.mean', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (4992, 5021), True, 'import numpy as np\n'), ((5023, 5058), 'numpy.var', 'np.var', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (5029, 5058), True, 'import numpy as np\n'), ((5088, 5126), 'numpy.median', 'np.median', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (5097, 5126), True, 'import numpy as np\n'), ((5128, 5164), 'numpy.mean', 'np.mean', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (5135, 5164), True, 'import numpy as np\n'), ((5166, 5201), 'numpy.var', 'np.var', (['scores_cf_perturbation_dist'], {}), '(scores_cf_perturbation_dist)\n', (5172, 5201), True, 'import numpy as np\n'), ((1625, 1670), 'sklearn_lvq.GmlvqModel', 'GmlvqModel', ([], {'prototypes_per_class': 'n_prototypes'}), '(prototypes_per_class=n_prototypes)\n', (1635, 1670), False, 'from sklearn_lvq import GlvqModel, GmlvqModel\n'), ((1735, 1780), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'multi_class': '"""multinomial"""'}), "(multi_class='multinomial')\n", (1753, 1780), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2040, 2084), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (2048, 2084), False, 'from sklearn.metrics import f1_score\n'), ((1846, 1881), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(7)'}), '(max_depth=7)\n', (1868, 1881), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2570, 2582), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2578, 2582), True, 'import numpy as np\n'), ((2596, 2609), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2604, 2609), True, 'import numpy as np\n'), ((2621, 2633), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2629, 2633), True, 'import numpy as np\n'), ((2651, 2663), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2659, 2663), True, 'import numpy as np\n'), ((2831, 2843), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2839, 2843), True, 'import numpy as np\n'), ((2857, 2870), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2865, 2870), True, 'import numpy as np\n'), ((2882, 2894), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2890, 2894), True, 'import numpy as np\n'), ((2912, 2924), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2920, 2924), True, 'import numpy as np\n'), ((3099, 3111), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3107, 3111), True, 'import numpy as np\n'), ((3125, 3138), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3133, 3138), True, 'import numpy as np\n'), ((3150, 3162), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3158, 3162), True, 'import numpy as np\n'), ((3180, 3192), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3188, 3192), True, 'import numpy as np\n'), ((3382, 3394), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3390, 3394), True, 'import numpy as np\n'), ((3408, 3421), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3416, 3421), True, 'import numpy as np\n'), ((3433, 3445), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3441, 3445), True, 'import numpy as np\n'), ((3463, 3475), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3471, 3475), True, 'import numpy as np\n')] |
import dicom
import nibabel as nb
import numpy as np
from cafndl_utils import augment_data
def prepare_data_from_nifti(data_volume, list_augments=[], scale_by_norm=True, slices=None):
"""
Parameters:
- - - - -
data_volume: input volume
list_augments: types of volumetric transformations to apply (flipx/y, flipxy, shiftx/y)
scale_by_norm: normalize the volume data
slices: specific slices to augment. |slices| = number of slices in original volume Z-dimension.
"""
# transpose to slice*x*y*channel (slice = z-dimension)
if np.ndim(data_volume)==3:
data_volume = data_volume[:,:,:,np.newaxis]
data_volume = np.transpose(data_volume, [2,0,1,3])
# scale
if scale_by_norm:
data_volume = data_volume / np.linalg.norm(data_volume.flatten())
# extract slices
if slices is not None:
data_volume = data_volume[slices,:,:,:]
# finish loading data
print('Image loaded, data size {:} (sample, x, y, channel)'.format(data_volume.shape))
# augmentation
if len(list_augments)>0:
list_data = []
for augment in list_augments:
data_augmented = augment_data(data_volume, axis_xy = [1,2], augment = augment)
list_data.append(data_augmented.reshape(data_volume.shape))
data_volume = np.concatenate(list_data, axis = 0)
return data_volume | [
"numpy.transpose",
"cafndl_utils.augment_data",
"numpy.ndim",
"numpy.concatenate"
] | [((659, 698), 'numpy.transpose', 'np.transpose', (['data_volume', '[2, 0, 1, 3]'], {}), '(data_volume, [2, 0, 1, 3])\n', (671, 698), True, 'import numpy as np\n'), ((573, 593), 'numpy.ndim', 'np.ndim', (['data_volume'], {}), '(data_volume)\n', (580, 593), True, 'import numpy as np\n'), ((1254, 1287), 'numpy.concatenate', 'np.concatenate', (['list_data'], {'axis': '(0)'}), '(list_data, axis=0)\n', (1268, 1287), True, 'import numpy as np\n'), ((1112, 1170), 'cafndl_utils.augment_data', 'augment_data', (['data_volume'], {'axis_xy': '[1, 2]', 'augment': 'augment'}), '(data_volume, axis_xy=[1, 2], augment=augment)\n', (1124, 1170), False, 'from cafndl_utils import augment_data\n')] |
import streamlit as st
import pandas as pd
import plotly.express as px
from numpy.lib.stride_tricks import as_strided
from numpy.lib import pad
import numpy as np
import datetime
import time
import pytz
# own module
from functions.get_data import get_data
# Global variables
def load_global_vars():
global DIR_TICKERS
global MAX_PERIOD
global TODAY
global BEGIN_DATE_THIS_YEAR
global DAYS_YTD
DIR_TICKERS = 'data/tickers.xlsx'
MAX_PERIOD = 20 * 365
TIMEZONE = pytz.timezone('Europe/Brussels')
TODAY = datetime.datetime.now(tz=TIMEZONE).replace(tzinfo=None)
BEGIN_DATE_THIS_YEAR = datetime.datetime(TODAY.year, 1, 1)
DAYS_YTD = (TODAY - BEGIN_DATE_THIS_YEAR).days
def rolling_spearman(seqa, seqb, window):
"""
computes rolling spearman correlation
"""
stridea = seqa.strides[0]
ssa = as_strided(seqa, shape=[len(seqa) - window + 1, window], strides=[stridea, stridea])
strideb = seqa.strides[0]
ssb = as_strided(seqb, shape=[len(seqb) - window + 1, window], strides =[strideb, strideb])
ar = pd.DataFrame(ssa)
br = pd.DataFrame(ssb)
ar = ar.rank(1)
br = br.rank(1)
corrs = ar.corrwith(br, 1)
return pad(corrs, (window - 1, 0), 'constant', constant_values=np.nan)
def run_page2():
load_global_vars()
st.sidebar.title("User settings")
st.title("Correlation overview")
# dd/mm/YY H:M:S
dt_string = TODAY.strftime("%d/%m/%Y %H:%M:%S")
st.write("Last updated at", dt_string)
ex_tickers = "CL=F, DX=F, GC=F, ES=F, NQ=F, DBC"
tickers = st.text_input("Provide ticker symbols, split by comma", ex_tickers)
df = get_data(tickers=tickers, period="20y")
df_perc = df["Close"].pct_change(periods=1).dropna()
ticker = st.sidebar.selectbox("Select ticker", list(df_perc.columns))
ex_periods = "10, 20, 30, 60, 90, 120, 150, 180, 210"
periods = st.text_input("Choose correlation periods, split by comma (expressed in days)", ex_periods)
periods = periods.split(",")
# convert to integer
periods = [int(p) for p in periods]
# correlation for table
store_corelations = {}
for c in df_perc.columns:
corr_tick = {}
if c !=ticker:
for p in periods:
df_perc_period = df_perc.tail(p)
corr_tick[p] = pd.DataFrame(df_perc_period[ticker]).corrwith(df_perc_period[c], axis=0, drop=False, method='spearman').values.tolist()[0]
store_corelations[c] = corr_tick
# precision
precision = st.sidebar.number_input("Number of digits for precision", min_value=1, max_value=10, value=3)
st.dataframe(pd.DataFrame(store_corelations).T.style.set_precision(precision))
# correlation for figure
corr_period = st.sidebar.slider("Choose correlation period figure (expressed in days)", min_value=5, max_value=200, value=30)
period_figure = st.sidebar.slider("Choose maximum period figure (expressed in days)", min_value=100, max_value=2000, value=1000)
corr_tick_rolling = {}
for c in df_perc.columns:
if c !=ticker:
df_perc_period = df_perc.tail(period_figure)
corr_tick_rolling[c] = rolling_spearman(df_perc_period[ticker].values, df_perc_period[c].values, corr_period)
# plot fiure
out = pd.DataFrame(corr_tick_rolling, index=df_perc_period.index).dropna().reset_index()
out_long = pd.melt(out, id_vars='Date', value_vars=out.columns[1:])
out_long = out_long.rename(columns={"variable": "Ticker", "value": "Correlation"})
fig = px.line(out_long, x="Date", y="Correlation", title=f'Rolling correlations with {ticker}', color="Ticker")
st.plotly_chart(fig, use_container_width=True) | [
"datetime.datetime",
"pytz.timezone",
"streamlit.sidebar.title",
"streamlit.write",
"numpy.lib.pad",
"streamlit.sidebar.slider",
"plotly.express.line",
"streamlit.plotly_chart",
"streamlit.sidebar.number_input",
"datetime.datetime.now",
"pandas.DataFrame",
"functions.get_data.get_data",
"str... | [((501, 533), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Brussels"""'], {}), "('Europe/Brussels')\n", (514, 533), False, 'import pytz\n'), ((629, 664), 'datetime.datetime', 'datetime.datetime', (['TODAY.year', '(1)', '(1)'], {}), '(TODAY.year, 1, 1)\n', (646, 664), False, 'import datetime\n'), ((1083, 1100), 'pandas.DataFrame', 'pd.DataFrame', (['ssa'], {}), '(ssa)\n', (1095, 1100), True, 'import pandas as pd\n'), ((1110, 1127), 'pandas.DataFrame', 'pd.DataFrame', (['ssb'], {}), '(ssb)\n', (1122, 1127), True, 'import pandas as pd\n'), ((1215, 1278), 'numpy.lib.pad', 'pad', (['corrs', '(window - 1, 0)', '"""constant"""'], {'constant_values': 'np.nan'}), "(corrs, (window - 1, 0), 'constant', constant_values=np.nan)\n", (1218, 1278), False, 'from numpy.lib import pad\n'), ((1326, 1359), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""User settings"""'], {}), "('User settings')\n", (1342, 1359), True, 'import streamlit as st\n'), ((1364, 1396), 'streamlit.title', 'st.title', (['"""Correlation overview"""'], {}), "('Correlation overview')\n", (1372, 1396), True, 'import streamlit as st\n'), ((1475, 1513), 'streamlit.write', 'st.write', (['"""Last updated at"""', 'dt_string'], {}), "('Last updated at', dt_string)\n", (1483, 1513), True, 'import streamlit as st\n'), ((1582, 1649), 'streamlit.text_input', 'st.text_input', (['"""Provide ticker symbols, split by comma"""', 'ex_tickers'], {}), "('Provide ticker symbols, split by comma', ex_tickers)\n", (1595, 1649), True, 'import streamlit as st\n'), ((1659, 1698), 'functions.get_data.get_data', 'get_data', ([], {'tickers': 'tickers', 'period': '"""20y"""'}), "(tickers=tickers, period='20y')\n", (1667, 1698), False, 'from functions.get_data import get_data\n'), ((1903, 1998), 'streamlit.text_input', 'st.text_input', (['"""Choose correlation periods, split by comma (expressed in days)"""', 'ex_periods'], {}), "('Choose correlation periods, split by comma (expressed in days)',\n ex_periods)\n", (1916, 1998), True, 'import streamlit as st\n'), ((2549, 2646), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Number of digits for precision"""'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(3)'}), "('Number of digits for precision', min_value=1,\n max_value=10, value=3)\n", (2572, 2646), True, 'import streamlit as st\n'), ((2778, 2893), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Choose correlation period figure (expressed in days)"""'], {'min_value': '(5)', 'max_value': '(200)', 'value': '(30)'}), "('Choose correlation period figure (expressed in days)',\n min_value=5, max_value=200, value=30)\n", (2795, 2893), True, 'import streamlit as st\n'), ((2910, 3026), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Choose maximum period figure (expressed in days)"""'], {'min_value': '(100)', 'max_value': '(2000)', 'value': '(1000)'}), "('Choose maximum period figure (expressed in days)',\n min_value=100, max_value=2000, value=1000)\n", (2927, 3026), True, 'import streamlit as st\n'), ((3425, 3481), 'pandas.melt', 'pd.melt', (['out'], {'id_vars': '"""Date"""', 'value_vars': 'out.columns[1:]'}), "(out, id_vars='Date', value_vars=out.columns[1:])\n", (3432, 3481), True, 'import pandas as pd\n'), ((3579, 3689), 'plotly.express.line', 'px.line', (['out_long'], {'x': '"""Date"""', 'y': '"""Correlation"""', 'title': 'f"""Rolling correlations with {ticker}"""', 'color': '"""Ticker"""'}), "(out_long, x='Date', y='Correlation', title=\n f'Rolling correlations with {ticker}', color='Ticker')\n", (3586, 3689), True, 'import plotly.express as px\n'), ((3689, 3735), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (3704, 3735), True, 'import streamlit as st\n'), ((546, 580), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'TIMEZONE'}), '(tz=TIMEZONE)\n', (567, 580), False, 'import datetime\n'), ((3327, 3386), 'pandas.DataFrame', 'pd.DataFrame', (['corr_tick_rolling'], {'index': 'df_perc_period.index'}), '(corr_tick_rolling, index=df_perc_period.index)\n', (3339, 3386), True, 'import pandas as pd\n'), ((2660, 2691), 'pandas.DataFrame', 'pd.DataFrame', (['store_corelations'], {}), '(store_corelations)\n', (2672, 2691), True, 'import pandas as pd\n'), ((2344, 2380), 'pandas.DataFrame', 'pd.DataFrame', (['df_perc_period[ticker]'], {}), '(df_perc_period[ticker])\n', (2356, 2380), True, 'import pandas as pd\n')] |
import pandas as pd
import geopandas
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
from functools import reduce
from wordcloud import WordCloud, STOPWORDS
import numpy as np
def get_geopandas_world():
world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'))
world.at[21, 'iso_a3'] = 'NOR'
world.at[43, 'iso_a3'] = 'FRA'
world.at[160, 'iso_a3'] = 'CYP'
world.at[167, 'iso_a3'] = 'SOM'
world.at[174, 'iso_a3'] = 'XKX'
return world
def plot_worldmap(count_per_country3_map, legend=False, name="Undefined", cmap='Greens', missing_kwds=None):
if missing_kwds is None:
missing_kwds = {'color': 'lightgrey'}
if 'color' not in missing_kwds.keys():
missing_kwds['color'] = 'lightgrey'
world = get_geopandas_world()
world[name] = world['iso_a3'].apply(
lambda x: count_per_country3_map[x] if x in count_per_country3_map else None)
if legend:
fig, ax = plt.subplots(1, 1)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
w_plt = world.plot(column=name, cmap=cmap, ax=ax, legend=True, cax=cax, missing_kwds=missing_kwds)
else:
w_plt = world.plot(column=name, cmap=cmap, missing_kwds=missing_kwds)
return w_plt
def _is_exclusive(series):
vcs = series.value_counts()
return True in vcs.index and vcs[True] == 1
def create_plot_from_truth_matrix(df, style='bar', names=None, with_exclusives=False):
if with_exclusives:
exclusive = df[df.apply(_is_exclusive, axis=1)]
df_count = df.apply(pd.value_counts)
if with_exclusives:
ex_count = exclusive.apply(pd.value_counts)
# names can either be None, a dict with substitutions for column names or
# a iterable with replacements for the current column names in the same order
if names:
if type(names) is not dict:
names = dict(zip(df_count.columns, names))
df_count = df_count.rename(columns=names)
if with_exclusives:
ex_count = ex_count.rename(columns=names)
ret = df_count.loc[True].plot(kind=style, color='lightgrey')
ret = df_count.loc[True].plot(kind=style, fill=(not with_exclusives))
if with_exclusives and True in ex_count.index:
ret = ex_count.loc[True].plot(kind=style)
return ret
def make_str(a):
return '' if a is None or (type(a) == float and np.isnan(a)) else str(a)
def combine_strs(a, b):
return make_str(a) + ' ' + make_str(b)
def plot_wordcloud(series, save_file=None, show=False):
text = reduce(combine_strs, series)
if not text.strip():
return None
# Generate word cloud
wordcloud = WordCloud(width=3000, height=1000, random_state=1, background_color='white', collocations=False,
stopwords=STOPWORDS).generate(text)
plt.figure(figsize=(40, 30))
# Display image
plt.imshow(wordcloud)
# No axis details
ret = plt.axis("off")
if save_file:
plt.savefig(save_file, bbox_inches='tight')
if not show:
plt.cla()
plt.clf()
plt.close()
ret = None
return ret
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"functools.reduce",
"matplotlib.pyplot.clf",
"wordcloud.WordCloud",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.isnan",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pyplot.axis",
"geopandas.datasets.get_path... | [((2616, 2644), 'functools.reduce', 'reduce', (['combine_strs', 'series'], {}), '(combine_strs, series)\n', (2622, 2644), False, 'from functools import reduce\n'), ((2895, 2923), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(40, 30)'}), '(figsize=(40, 30))\n', (2905, 2923), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2970), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {}), '(wordcloud)\n', (2959, 2970), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3018), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3011, 3018), True, 'import matplotlib.pyplot as plt\n'), ((277, 327), 'geopandas.datasets.get_path', 'geopandas.datasets.get_path', (['"""naturalearth_lowres"""'], {}), "('naturalearth_lowres')\n", (304, 327), False, 'import geopandas\n'), ((994, 1012), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1006, 1012), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1054), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (1050, 1054), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3046, 3089), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_file'], {'bbox_inches': '"""tight"""'}), "(save_file, bbox_inches='tight')\n", (3057, 3089), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3125), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3123, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3143), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3141, 3143), True, 'import matplotlib.pyplot as plt\n'), ((3152, 3163), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3161, 3163), True, 'import matplotlib.pyplot as plt\n'), ((2732, 2853), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(3000)', 'height': '(1000)', 'random_state': '(1)', 'background_color': '"""white"""', 'collocations': '(False)', 'stopwords': 'STOPWORDS'}), "(width=3000, height=1000, random_state=1, background_color='white',\n collocations=False, stopwords=STOPWORDS)\n", (2741, 2853), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((2453, 2464), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (2461, 2464), True, 'import numpy as np\n')] |
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski
from rdkit.Chem import Descriptors
from rdkit.DataStructs import FingerprintSimilarity, ConvertToNumpyArray
def clean_mol(smiles):
"""
Construct a molecule from a SMILES string, removing stereochemistry and
explicit hydrogens, and setting aromaticity.
"""
mol = Chem.MolFromSmiles(str(smiles), sanitize=64)
if mol is None:
raise ValueError("Invalid SMILES")
Chem.RemoveStereochemistry(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
return mol
def clean_mols(all_smiles):
"""
Construct a list of molecules from a list of SMILES strings, replacing
invalid molecules with None in the list.
"""
mols = []
for smiles in all_smiles:
try:
mol = clean_mol(smiles)
mols.append(mol)
except ValueError:
mols.append(None)
return mols
def in_Ro5(mol):
"""
Test whether a molecule is in Lipinski "Rule of 5" space, meaning
- 5 or fewer H bond donors
- 10 or fewer H bond acceptors
- MW < 500 Da
- logP < 5
"""
h_donor = Lipinski.NumHDonors(mol)
h_accept = Lipinski.NumHAcceptors(mol)
mw = Descriptors.MolWt(mol)
logP = Descriptors.MolLogP(mol)
Ro5 = h_donor <= 5 and h_accept <= 10 and mw <= 500 and logP < 5
return(Ro5)
def get_ecfp6_fingerprints(mols):
"""
Get ECFP6 fingerprints for a list of molecules which may include `None`s,
gracefully handling `None` values by returning a `None` value in that
position.
"""
fps = []
for mol in mols:
if mol is None:
fps.append(None)
else:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 3, nBits=1024)
fps.append(fp)
return(fps)
def get_bit_vector(fp):
arr = np.zeros((1,))
ConvertToNumpyArray(fp, arr)
return(arr)
def get_tanimoto(list1, list2):
tcs = []
for fp1 in list1:
for fp2 in list2:
if fp1 is None or fp2 is None:
tcs.append(None)
else:
tc = FingerprintSimilarity(fp1, fp2)
tcs.append(tc)
return(tcs)
| [
"rdkit.Chem.Descriptors.MolWt",
"rdkit.DataStructs.ConvertToNumpyArray",
"rdkit.Chem.Descriptors.MolLogP",
"rdkit.Chem.RemoveStereochemistry",
"rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect",
"rdkit.Chem.Lipinski.NumHAcceptors",
"rdkit.Chem.SanitizeMol",
"numpy.zeros",
"rdkit.Chem.Lipinski.NumHDo... | [((501, 532), 'rdkit.Chem.RemoveStereochemistry', 'Chem.RemoveStereochemistry', (['mol'], {}), '(mol)\n', (527, 532), False, 'from rdkit import Chem\n'), ((537, 558), 'rdkit.Chem.SanitizeMol', 'Chem.SanitizeMol', (['mol'], {}), '(mol)\n', (553, 558), False, 'from rdkit import Chem\n'), ((569, 587), 'rdkit.Chem.RemoveHs', 'Chem.RemoveHs', (['mol'], {}), '(mol)\n', (582, 587), False, 'from rdkit import Chem\n'), ((1189, 1213), 'rdkit.Chem.Lipinski.NumHDonors', 'Lipinski.NumHDonors', (['mol'], {}), '(mol)\n', (1208, 1213), False, 'from rdkit.Chem import Lipinski\n'), ((1229, 1256), 'rdkit.Chem.Lipinski.NumHAcceptors', 'Lipinski.NumHAcceptors', (['mol'], {}), '(mol)\n', (1251, 1256), False, 'from rdkit.Chem import Lipinski\n'), ((1266, 1288), 'rdkit.Chem.Descriptors.MolWt', 'Descriptors.MolWt', (['mol'], {}), '(mol)\n', (1283, 1288), False, 'from rdkit.Chem import Descriptors\n'), ((1300, 1324), 'rdkit.Chem.Descriptors.MolLogP', 'Descriptors.MolLogP', (['mol'], {}), '(mol)\n', (1319, 1324), False, 'from rdkit.Chem import Descriptors\n'), ((1893, 1907), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1901, 1907), True, 'import numpy as np\n'), ((1912, 1940), 'rdkit.DataStructs.ConvertToNumpyArray', 'ConvertToNumpyArray', (['fp', 'arr'], {}), '(fp, arr)\n', (1931, 1940), False, 'from rdkit.DataStructs import FingerprintSimilarity, ConvertToNumpyArray\n'), ((1753, 1810), 'rdkit.Chem.AllChem.GetMorganFingerprintAsBitVect', 'AllChem.GetMorganFingerprintAsBitVect', (['mol', '(3)'], {'nBits': '(1024)'}), '(mol, 3, nBits=1024)\n', (1790, 1810), False, 'from rdkit.Chem import AllChem\n'), ((2167, 2198), 'rdkit.DataStructs.FingerprintSimilarity', 'FingerprintSimilarity', (['fp1', 'fp2'], {}), '(fp1, fp2)\n', (2188, 2198), False, 'from rdkit.DataStructs import FingerprintSimilarity, ConvertToNumpyArray\n')] |
import cv2
import numpy as np
from __utils__.general import show_image
def power_law(image, c=1, gamma=1):
out = image.copy()
for pixel in np.nditer(out, op_flags=['readwrite']):
pixel[...] = c * np.power(pixel, gamma)
return out
if __name__ == '__main__':
image = cv2.imread('../../asserts/images/elena.jpg', 0)
res = np.hstack((image, power_law(image, c=1, gamma=1.1)))
show_image(res)
| [
"numpy.nditer",
"cv2.imread",
"numpy.power",
"__utils__.general.show_image"
] | [((150, 188), 'numpy.nditer', 'np.nditer', (['out'], {'op_flags': "['readwrite']"}), "(out, op_flags=['readwrite'])\n", (159, 188), True, 'import numpy as np\n'), ((294, 341), 'cv2.imread', 'cv2.imread', (['"""../../asserts/images/elena.jpg"""', '(0)'], {}), "('../../asserts/images/elena.jpg', 0)\n", (304, 341), False, 'import cv2\n'), ((409, 424), '__utils__.general.show_image', 'show_image', (['res'], {}), '(res)\n', (419, 424), False, 'from __utils__.general import show_image\n'), ((215, 237), 'numpy.power', 'np.power', (['pixel', 'gamma'], {}), '(pixel, gamma)\n', (223, 237), True, 'import numpy as np\n')] |
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelBinarizer
def clean_data(data, features_to_clean):
for feature in features_to_clean:
data.drop(feature, axis=1, inplace=True)
def fulfill_missing_values(data, metadata=None):
if metadata:
age_median = metadata['age_median']
fare_median = metadata['fare_median']
embarked_value = metadata['embarked_value']
else:
age_median = data['Age'].median()
fare_median = data['Fare'].median()
embarked_value = data['Embarked'].mode()[0]
data['Age'].fillna(age_median, inplace=True)
data['Fare'].fillna(fare_median, inplace=True)
data['Embarked'].fillna(embarked_value, inplace=True)
return {
'age_median': age_median,
'fare_median': fare_median,
'embarked_value': embarked_value
}
def one_hot_encoding(data, features, metadata={}):
assert len(features) > 0
for feature in features:
label_binarizer = LabelBinarizer()
if feature in metadata:
label_binarizer.classes_ = np.array(metadata[feature])
labeled_features = label_binarizer.transform(data[feature])
else:
labeled_features = label_binarizer.fit_transform(data[feature])
column_names_for_labeled_features = ['{}_{}'.format(feature, cls) for cls in label_binarizer.classes_] if len(
label_binarizer.classes_) >= 3 else ['{}_{}'.format(feature, label_binarizer.classes_[0])]
data = data.join(pd.DataFrame(labeled_features,
columns=column_names_for_labeled_features,
index=data.index))
data.drop(feature, axis=1, inplace=True)
metadata[feature] = label_binarizer.classes_.tolist()
return data, metadata
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--input-train-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--input-test-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-train-data-path', type=str, help='an integer for the accumulator')
parser.add_argument('--output-test-data-path', type=str, help='an integer for the accumulator')
args = parser.parse_args()
Path(args.output_train_data_path).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_test_data_path).parent.mkdir(parents=True, exist_ok=True)
train = pd.read_csv(args.input_train_data_path)
test = pd.read_csv(args.input_test_data_path)
clean_data(data=train,
features_to_clean=['PassengerId', 'Cabin', 'Name', 'Ticket'])
clean_data(data=test,
features_to_clean=['Cabin', 'Name', 'Ticket'])
missing_values_fulfillment_metadata = fulfill_missing_values(data=train)
fulfill_missing_values(data=test,
metadata=missing_values_fulfillment_metadata)
train, encoding_metadata = one_hot_encoding(train,
features=['Embarked', 'Sex'])
test, _ = one_hot_encoding(test,
features=['Embarked', 'Sex'],
metadata=encoding_metadata)
train.to_csv(args.output_train_data_path)
test.to_csv(args.output_test_data_path)
| [
"sklearn.preprocessing.LabelBinarizer",
"argparse.ArgumentParser",
"pandas.read_csv",
"pathlib.Path",
"numpy.array",
"pandas.DataFrame"
] | [((1885, 1946), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (1908, 1946), False, 'import argparse\n'), ((2518, 2557), 'pandas.read_csv', 'pd.read_csv', (['args.input_train_data_path'], {}), '(args.input_train_data_path)\n', (2529, 2557), True, 'import pandas as pd\n'), ((2565, 2603), 'pandas.read_csv', 'pd.read_csv', (['args.input_test_data_path'], {}), '(args.input_test_data_path)\n', (2576, 2603), True, 'import pandas as pd\n'), ((1043, 1059), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (1057, 1059), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((1131, 1158), 'numpy.array', 'np.array', (['metadata[feature]'], {}), '(metadata[feature])\n', (1139, 1158), True, 'import numpy as np\n'), ((1568, 1663), 'pandas.DataFrame', 'pd.DataFrame', (['labeled_features'], {'columns': 'column_names_for_labeled_features', 'index': 'data.index'}), '(labeled_features, columns=column_names_for_labeled_features,\n index=data.index)\n', (1580, 1663), True, 'import pandas as pd\n'), ((2358, 2391), 'pathlib.Path', 'Path', (['args.output_train_data_path'], {}), '(args.output_train_data_path)\n', (2362, 2391), False, 'from pathlib import Path\n'), ((2434, 2466), 'pathlib.Path', 'Path', (['args.output_test_data_path'], {}), '(args.output_test_data_path)\n', (2438, 2466), False, 'from pathlib import Path\n')] |
"""
This file is part of the package FUNtoFEM for coupled aeroelastic simulation
and design optimization.
Copyright (C) 2015 Georgia Tech Research Corporation.
Additional copyright (C) 2015 <NAME>, <NAME> and <NAME>.
All rights reserved.
FUNtoFEM is licensed under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
--------------------------------------------------------------------------------
The Works
--------------------------------------------------------------------------------
The following script demonstrates the displacement transfer's capability to
transfer a variety of types of displacements from a relatively simple structural
mesh to a relatively simple aerodynamic surface mesh
"""
import numpy as np
from mpi4py import MPI
from funtofem import TransferScheme
import sys
sys.path.append('../')
from tecplot_output import writeOutputForTecplot
import meshpy.triangle as triangle
"""
--------------------------------------------------------------------------------
Creating meshes
--------------------------------------------------------------------------------
"""
# Create boundary of high aspect ratio, tapered plate for structure
struct_bound = [(1.791204, 0.654601),
(1.980463, 4.844049),
(3.535093, 4.533113),
(3.994722, 0.654601)]
def round_trip_connect(start, end):
result = []
for i in range(start, end):
result.append((i, i+1))
result.append((end, start))
return result
struct_facets = round_trip_connect(0, len(struct_bound)-1)
# Mesh the plate using Triangle
struct_info = triangle.MeshInfo()
struct_info.set_points(struct_bound)
struct_info.set_facets(struct_facets)
struct_mesh = triangle.build(struct_info, max_volume=1e-1, min_angle=25)
# triangle.write_gnuplot_mesh("triangles.dat", struct_mesh)
# Extracting points and connectivity
z_offset = 0.0
struct_X = []
for point in struct_mesh.points:
point += [z_offset]
struct_X.append(point)
struct_X = np.array(struct_X).flatten()
struct_nnodes = len(struct_X)/3
struct_conn = []
for i, t in enumerate(struct_mesh.elements):
struct_conn += t
struct_conn = np.array(struct_conn) + 1
struct_nelems = len(struct_mesh.elements)
struct_ptr = np.arange(0, 3*struct_nelems+1, 3, dtype='intc')
# Create rectangular plate for aerodynamic surface
aero_bound = [(1.5, 0.0),
(1.5, 6.0),
(4.5, 6.0),
(4.5, 0.0)]
def round_trip_connect(start, end):
result = []
for i in range(start, end):
result.append((i, i+1))
result.append((end, start))
return result
aero_facets = round_trip_connect(0, len(aero_bound)-1)
# Mesh the plate using Triangle
aero_info = triangle.MeshInfo()
aero_info.set_points(aero_bound)
aero_info.set_facets(aero_facets)
aero_mesh = triangle.build(aero_info, max_volume=1e-3, min_angle=25)
# Extracting points and connectivity
z_offset = 1.0
aero_X = []
for point in aero_mesh.points:
point += [z_offset]
aero_X.append(point)
aero_X = np.array(aero_X).flatten()
aero_nnodes = len(aero_X)/3
aero_conn = []
for i, t in enumerate(aero_mesh.elements):
aero_conn += t
aero_conn = np.array(aero_conn) + 1
aero_nelems = len(aero_mesh.elements)
aero_ptr = np.arange(0, 3*aero_nelems+1, 3, dtype='intc')
"""
--------------------------------------------------------------------------------
Defining displacements
--------------------------------------------------------------------------------
"""
# STRETCH
st = 1.0 # stretch factor
stretch = np.array([[1.0, 0.0, 0.0],
[0.0, st, 0.0],
[0.0, 0.0, 1.0]])
stretched = np.dot(stretch, struct_X.reshape((-1,3)).T).T
# SHEAR
sh = 0.25 # 2. / b # shear factor
shear = np.array([[1.0, sh, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
sheared = np.dot(shear, stretched.T).T
# TWIST
theta_tip = -90.0 * np.pi / 180.0 # degrees of twist at tip
twisted = np.zeros(sheared.shape)
y = struct_X[1::3]
b = y.max() - y.min()
for k in range(struct_nnodes):
p = sheared[k,:]
y = p[1]
theta = theta_tip * y / b
twist = np.array([[np.cos(theta), 0.0, np.sin(theta)],
[0.0, 1.0, 0.0],
[-np.sin(theta), 0.0, np.cos(theta)]])
p = np.dot(twist, p)
twisted[k,:] = p
# BEND
bent_z = 0.05*struct_X[1::3]**2
bent_z = bent_z.reshape((-1,1))
bend = np.concatenate((np.zeros((struct_nnodes, 1)),
np.zeros((struct_nnodes, 1)),
bent_z), axis=1)
bent = twisted + bend
# TRANSLATION
translation = np.concatenate((np.zeros((struct_nnodes, 1)),
np.zeros((struct_nnodes, 1)),
0.0 * np.ones((struct_nnodes, 1))), axis=1)
translated = bent + translation
struct_disps = translated.flatten() - struct_X
"""
--------------------------------------------------------------------------------
Running TransferScheme
--------------------------------------------------------------------------------
"""
# Creating transfer scheme
comm = MPI.COMM_SELF
isymm = -1
num_nearest = 20
beta = 0.5
meld = TransferScheme.pyMELD(comm, comm, 0, comm, 0, scheme, isymm)
# Set nodes into transfer scheme
meld.setStructNodes(struct_X)
meld.setAeroNodes(aero_X)
# Initialize funtofem
meld.initialize()
# Transfer displacements
aero_disps = np.zeros(3*aero_nnodes, dtype=TransferScheme.dtype)
meld.transferDisps(struct_disps)
# Write meshes to file
struct_elem_type = 1
aero_elem_type = 1
writeOutputForTecplot(struct_X, aero_X,
struct_disps, aero_disps,
struct_conn, aero_conn,
struct_ptr, aero_ptr,
struct_elem_type, aero_elem_type)
| [
"funtofem.TransferScheme.pyMELD",
"numpy.ones",
"meshpy.triangle.MeshInfo",
"meshpy.triangle.build",
"tecplot_output.writeOutputForTecplot",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"sys.path.append",
"numpy.arange"
] | [((1264, 1286), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1279, 1286), False, 'import sys\n'), ((2048, 2067), 'meshpy.triangle.MeshInfo', 'triangle.MeshInfo', ([], {}), '()\n', (2065, 2067), True, 'import meshpy.triangle as triangle\n'), ((2157, 2214), 'meshpy.triangle.build', 'triangle.build', (['struct_info'], {'max_volume': '(0.1)', 'min_angle': '(25)'}), '(struct_info, max_volume=0.1, min_angle=25)\n', (2171, 2214), True, 'import meshpy.triangle as triangle\n'), ((2678, 2730), 'numpy.arange', 'np.arange', (['(0)', '(3 * struct_nelems + 1)', '(3)'], {'dtype': '"""intc"""'}), "(0, 3 * struct_nelems + 1, 3, dtype='intc')\n", (2687, 2730), True, 'import numpy as np\n'), ((3148, 3167), 'meshpy.triangle.MeshInfo', 'triangle.MeshInfo', ([], {}), '()\n', (3165, 3167), True, 'import meshpy.triangle as triangle\n'), ((3247, 3304), 'meshpy.triangle.build', 'triangle.build', (['aero_info'], {'max_volume': '(0.001)', 'min_angle': '(25)'}), '(aero_info, max_volume=0.001, min_angle=25)\n', (3261, 3304), True, 'import meshpy.triangle as triangle\n'), ((3676, 3726), 'numpy.arange', 'np.arange', (['(0)', '(3 * aero_nelems + 1)', '(3)'], {'dtype': '"""intc"""'}), "(0, 3 * aero_nelems + 1, 3, dtype='intc')\n", (3685, 3726), True, 'import numpy as np\n'), ((3964, 4024), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, st, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, st, 0.0], [0.0, 0.0, 1.0]])\n', (3972, 4024), True, 'import numpy as np\n'), ((4174, 4234), 'numpy.array', 'np.array', (['[[1.0, sh, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, sh, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (4182, 4234), True, 'import numpy as np\n'), ((4389, 4412), 'numpy.zeros', 'np.zeros', (['sheared.shape'], {}), '(sheared.shape)\n', (4397, 4412), True, 'import numpy as np\n'), ((5572, 5632), 'funtofem.TransferScheme.pyMELD', 'TransferScheme.pyMELD', (['comm', 'comm', '(0)', 'comm', '(0)', 'scheme', 'isymm'], {}), '(comm, comm, 0, comm, 0, scheme, isymm)\n', (5593, 5632), False, 'from funtofem import TransferScheme\n'), ((5803, 5856), 'numpy.zeros', 'np.zeros', (['(3 * aero_nnodes)'], {'dtype': 'TransferScheme.dtype'}), '(3 * aero_nnodes, dtype=TransferScheme.dtype)\n', (5811, 5856), True, 'import numpy as np\n'), ((5952, 6105), 'tecplot_output.writeOutputForTecplot', 'writeOutputForTecplot', (['struct_X', 'aero_X', 'struct_disps', 'aero_disps', 'struct_conn', 'aero_conn', 'struct_ptr', 'aero_ptr', 'struct_elem_type', 'aero_elem_type'], {}), '(struct_X, aero_X, struct_disps, aero_disps,\n struct_conn, aero_conn, struct_ptr, aero_ptr, struct_elem_type,\n aero_elem_type)\n', (5973, 6105), False, 'from tecplot_output import writeOutputForTecplot\n'), ((2597, 2618), 'numpy.array', 'np.array', (['struct_conn'], {}), '(struct_conn)\n', (2605, 2618), True, 'import numpy as np\n'), ((3603, 3622), 'numpy.array', 'np.array', (['aero_conn'], {}), '(aero_conn)\n', (3611, 3622), True, 'import numpy as np\n'), ((4281, 4307), 'numpy.dot', 'np.dot', (['shear', 'stretched.T'], {}), '(shear, stretched.T)\n', (4287, 4307), True, 'import numpy as np\n'), ((4716, 4732), 'numpy.dot', 'np.dot', (['twist', 'p'], {}), '(twist, p)\n', (4722, 4732), True, 'import numpy as np\n'), ((2438, 2456), 'numpy.array', 'np.array', (['struct_X'], {}), '(struct_X)\n', (2446, 2456), True, 'import numpy as np\n'), ((3458, 3474), 'numpy.array', 'np.array', (['aero_X'], {}), '(aero_X)\n', (3466, 3474), True, 'import numpy as np\n'), ((4849, 4877), 'numpy.zeros', 'np.zeros', (['(struct_nnodes, 1)'], {}), '((struct_nnodes, 1))\n', (4857, 4877), True, 'import numpy as np\n'), ((4902, 4930), 'numpy.zeros', 'np.zeros', (['(struct_nnodes, 1)'], {}), '((struct_nnodes, 1))\n', (4910, 4930), True, 'import numpy as np\n'), ((5039, 5067), 'numpy.zeros', 'np.zeros', (['(struct_nnodes, 1)'], {}), '((struct_nnodes, 1))\n', (5047, 5067), True, 'import numpy as np\n'), ((5099, 5127), 'numpy.zeros', 'np.zeros', (['(struct_nnodes, 1)'], {}), '((struct_nnodes, 1))\n', (5107, 5127), True, 'import numpy as np\n'), ((5165, 5192), 'numpy.ones', 'np.ones', (['(struct_nnodes, 1)'], {}), '((struct_nnodes, 1))\n', (5172, 5192), True, 'import numpy as np\n'), ((4572, 4585), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4578, 4585), True, 'import numpy as np\n'), ((4592, 4605), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4598, 4605), True, 'import numpy as np\n'), ((4691, 4704), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4697, 4704), True, 'import numpy as np\n'), ((4671, 4684), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4677, 4684), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import diffpy.Structure
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import time
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import distance
cifFilePath = '/home/cameron/Dropbox/T2_Dataset/molGeom/T2_2_num_molGeom.cif'
def loadFile(filePath):
molecule = diffpy.Structure.loadStructure(filePath)
coords = molecule.xyz_cartn
np_coords = np.array(coords) # Simple cartesian coords
return np_coords
def adjacencyMatrixNN(coords, knn = 10):
dist = distance.pdist(coords, 'euclidean') # Create condensed distance matrix
adjacency = distance.squareform(dist) # Create standard adjacency matrix
for i, row in enumerate(adjacency):
lowestVals = np.partition(row, knn-1)[:knn] # take k neighbours
threshold = lowestVals.max() # Take the longest distance from k neighbours
exceedsThresholdFlags = row > threshold
adjacency[i][exceedsThresholdFlags] = 0
return adjacency
def networkPlot3D(G, angle, coords):
# 3D network plot
with plt.style.context(('ggplot')):
fig = plt.figure(figsize=(10,7))
ax = Axes3D(fig)
# Loop on the pos dictionary to extract the x,y,z coordinates of each node
for value in coords:
xi = value[0]
yi = value[1]
zi = value[2]
# Scatter plot
ax.scatter(xi, yi, zi, c='blue',edgecolors='k', alpha=0.7)
# Loop on the list of edges to get the x,y,z, coordinates of the connected nodes
# Those two points are the extrema of the line to be plotted
for i,j in enumerate(G.edges()):
x = np.array((coords[j[0]][0], coords[j[1]][0]))
y = np.array((coords[j[0]][1], coords[j[1]][1]))
z = np.array((coords[j[0]][2], coords[j[1]][2]))
# Plot the connecting lines
ax.plot(x, y, z, c='black', alpha=0.5)
# Set the initial view
ax.view_init(30, angle)
# Hide the axes
plt.show()
return
start_time = time.time()
coords = loadFile(cifFilePath)
adjacency = adjacencyMatrixNN(coords, 10)
graph = nx.to_networkx_graph(adjacency) # initialise graph
mst = nx.minimum_spanning_tree(graph)
networkPlot3D(mst, 20, coords)
print("Running time %s seconds" % (time.time() - start_time))
| [
"scipy.spatial.distance.squareform",
"networkx.minimum_spanning_tree",
"scipy.spatial.distance.pdist",
"numpy.partition",
"networkx.to_networkx_graph",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.context",
"time.time",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.sho... | [((2087, 2098), 'time.time', 'time.time', ([], {}), '()\n', (2096, 2098), False, 'import time\n'), ((2182, 2213), 'networkx.to_networkx_graph', 'nx.to_networkx_graph', (['adjacency'], {}), '(adjacency)\n', (2202, 2213), True, 'import networkx as nx\n'), ((2239, 2270), 'networkx.minimum_spanning_tree', 'nx.minimum_spanning_tree', (['graph'], {}), '(graph)\n', (2263, 2270), True, 'import networkx as nx\n'), ((417, 433), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (425, 433), True, 'import numpy as np\n'), ((541, 576), 'scipy.spatial.distance.pdist', 'distance.pdist', (['coords', '"""euclidean"""'], {}), "(coords, 'euclidean')\n", (555, 576), False, 'from scipy.spatial import distance\n'), ((628, 653), 'scipy.spatial.distance.squareform', 'distance.squareform', (['dist'], {}), '(dist)\n', (647, 653), False, 'from scipy.spatial import distance\n'), ((2048, 2058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2056, 2058), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1100), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""ggplot"""'], {}), "('ggplot')\n", (1090, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1135, 1152), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1176), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1171, 1176), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((750, 776), 'numpy.partition', 'np.partition', (['row', '(knn - 1)'], {}), '(row, knn - 1)\n', (762, 776), True, 'import numpy as np\n'), ((1705, 1749), 'numpy.array', 'np.array', (['(coords[j[0]][0], coords[j[1]][0])'], {}), '((coords[j[0]][0], coords[j[1]][0]))\n', (1713, 1749), True, 'import numpy as np\n'), ((1766, 1810), 'numpy.array', 'np.array', (['(coords[j[0]][1], coords[j[1]][1])'], {}), '((coords[j[0]][1], coords[j[1]][1]))\n', (1774, 1810), True, 'import numpy as np\n'), ((1827, 1871), 'numpy.array', 'np.array', (['(coords[j[0]][2], coords[j[1]][2])'], {}), '((coords[j[0]][2], coords[j[1]][2]))\n', (1835, 1871), True, 'import numpy as np\n'), ((2339, 2350), 'time.time', 'time.time', ([], {}), '()\n', (2348, 2350), False, 'import time\n')] |
import numpy as np
from numpy import linalg as LA
import sys
import librosa
from scipy import linalg
import copy
import random
from math import log
# import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import multiprocessing
import logging
import argparse
def sampleS(S, k):
sample = []
if len(S) <= k:
return S
while len(sample) < k:
new = S[random.randint(0, len(S) - 1)]
if not new in sample:
sample.append(new)
return sample
def buffer(signal, L, M):
if M >= L:
logging.info(
'Error: Overlapping windows cannot be larger than frame length!')
sys.exit()
#
len_signal = len(signal)
#
logging.info('The signal length is %s: ' % (len_signal))
#
K = np.ceil(len_signal / L).astype('int') # num_frames
#
logging.info('The number of frames \'K\' is %s: ' % (K))
logging.info('The length of each frame \'L\' is %s: ' % (L))
#
X_tmp = []
k = 1
while (True):
start_ind = ((k - 1) * (L - M) + 1) - 1
end_ind = ((k * L) - (k - 1) * M)
if start_ind == len_signal:
break
elif (end_ind > len_signal):
# logging.info(('k=%s, [%s, %s] ' % (k, start_ind, end_ind - 1))
val_in = len_signal - start_ind
tmp_seg = np.zeros(L)
tmp_seg[:val_in] = signal[start_ind:]
X_tmp.append(tmp_seg)
break
else:
# logging.info(('k=%s, [%s, %s] ' % (k, start_ind, end_ind - 1))
X_tmp.append(signal[start_ind:end_ind])
k += 1
#
return X_tmp
def unbuffer(X, hop):
N, L = X.shape
#
T = N + L * hop
K = np.arange(0, N)
x = np.zeros(T)
H = np.hanning(N)
for k in xrange(0, L):
x[K] = x[K] + np.multiply(H, X[:, k])
K = K + hop
#
return x
class SpeechDenoise:
def __init__(
self, X, params, M, signal=[]
): # X is the np.vstacked transpose of the buffered signal (buffered==split up into overlapping windows)
self.meaningfulNodes = range(
X.shape[1]) # this is pretty much the same thing as self.I
self.X = X
self.D = []
self.params = params
self.n_iter = self.params['rule_1']['n_iter'] # num_iterations
self.error = self.params['rule_2']['error'] # num_iterations
#
self.verbose = self.params['verbose']
#
# THE following K and L were typo/switched in the GAD.py code. they're fixed here:
self.K = self.X.shape[0] # sample length
self.L = self.X.shape[
1] # maximum atoms to be learned (i.e. size of ground set)
#
self.I = np.arange(
0, self.L
) # self.I is the ground set of elements (dictionary atoms) we can choose
self.set_ind = []
self.k_min_sum = 0.0
# Initializating the residual matrix 'R' by using 'X'
self.R = self.X.copy()
# The following are (sortof) optional.
# we use the following 3 instance variables to calculate RMSE after each iter
self.M = M
self.signal = signal # we leave this empty unless we actually want to do the RMSE, which is computationall
#intense and also requires sending the (big) signal across the line...
self.rmse = [] # to hold RMSE after each iter
# and this one to plot solution quality over time
self.k_min_data = []
def function(self, S, big_number=25.0):
# Note: this only works for f(S); it will NOT work on any input except S. to do that, would need to find
# the elements in the function's argument that are not in S, then iteratively add to the value we return.
return (len(S) * big_number - self.k_min_sum)
def functionMarg_quickestimate(self,
new_elements,
curr_elements,
big_number=25.0):
new_elems = [ele for ele in new_elements if ele not in curr_elements]
if not len(new_elems):
return (0)
# This is a bit of a hack...
# Actually, the below version is unfair/inaccurate, as we should really update R by orthogonalizing
# after we add each column. Here, I'm treating the function as a modular function within each
# round of adaptive sampling (and submodular across rounds) which is entertainingly ridiculous.
# BUT if it works at de-noising, then I don't care :)
# NOTE that in the original GAD code, self.k_min_sum is similar to what I'm calling sum_of_norm_ratios.
new_elems = [ele for ele in new_elements if ele not in curr_elements]
R_copy = copy.copy(self.R)
#sum_of_norm_ratios = np.sum(LA.norm(R_copy[:, new_elems], 1)) / np.sum([LA.norm(R_copy[:, I_ind_k_min], 2) for I_ind_k_min in new_elems])
sum_of_norm_ratios = np.sum([
LA.norm(R_copy[:, I_ind_k_min], 1) / LA.norm(
R_copy[:, I_ind_k_min], 2) for I_ind_k_min in new_elems
])
return (len(new_elems) * big_number - sum_of_norm_ratios)
def functionMarg(self, new_elements, curr_elements, big_number=25.0):
# This is the more correct (but slower and more complicated) functionMarg. See note in other simpler version above.
# NOTE: IT ASSUMES THAT S IS THE CURRENT S. IT WILL BE WRONG WHEN input S is NOT the current solution!!!
new_elems = [ele for ele in new_elements if ele not in curr_elements]
if not len(new_elems):
return (0)
# Copy everything important... we have to update them iteratively for each ele in the new sample, but
# we might not use this sample so can't change the originals...
R_copy = copy.copy(self.R)
#print self.R.shape, '=self.R.shape'
D_copy = copy.copy(self.D)
I_copy = copy.copy(self.I)
k_min_sum_copy = copy.copy(self.k_min_sum)
set_ind_copy = self.set_ind
marginal_k_min_sum_copy = 0
# New elements we compute marginal value for
new_elems = [ele for ele in new_elements if ele not in curr_elements]
# do the GAD find_column() routine, but we're not trying to find a new column; we're evaluating
# the quality of ones in the set we sampled. Basically, that means checking for changes in k_min_sum_copy.
#
for I_ind_k_min in new_elems:
sample_avg_k_min = 0
r_k = R_copy[:, I_ind_k_min]
#
k_min = LA.norm(r_k, 1) / LA.norm(r_k, 2)
#logging.info('k_min inside a sample is %s: ' % k_min)
sample_avg_k_min += k_min
#
marginal_k_min_sum_copy = marginal_k_min_sum_copy + k_min
k_min_sum_copy = k_min_sum_copy + k_min
#
r_k_min = R_copy[:, I_ind_k_min]
#
# Set the l-th atom to equal to normalized r_k
psi = r_k_min / LA.norm(r_k_min, 2)
#
# Add to the dictionary D and its index and shrinking set I
D_copy.append(psi)
set_ind_copy.append(I_ind_k_min)
# Compute the new residual for all columns k
for kk in I_copy:
r_kk = R_copy[:, kk]
alpha = np.dot(r_kk, psi)
R_copy[:, kk] = r_kk - np.dot(psi, alpha)
#
I_copy = np.delete(I_copy, [I_ind_k_min])
#print 'sample avg k_min = ', sample_avg_k_min/np.float(len(new_elems))
#logging.info('marginal_k_min_sum_copy of a sample is %s: ' % marginal_k_min_sum_copy)
#logging.info('some sample val is %s: ' % ( - marginal_k_min_sum_copy))
#logging.info('big number is %s: ' % ( big_number))
#logging.info('len(new_elems) %s: ' % ( len(new_elems)))
return (len(new_elems) * big_number - marginal_k_min_sum_copy)
def adaptiveSampling_adam(f,
k,
numSamples,
r,
opt,
alpha1,
alpha2,
compute_rmse=False,
speed_over_accuracy=False,
parallel=False):
# This large uncommented script is not complicated enough, so here we go:
if speed_over_accuracy:
def functionMarg_closure(new_elements, curr_elements, big_number=25.0):
return f.functionMarg_quickestimate(
new_elements, curr_elements, big_number=25.0)
else:
def functionMarg_closure(new_elements, curr_elements, big_number=25.0):
return f.functionMarg(new_elements, curr_elements, big_number=25.0)
S = copy.deepcopy(f.meaningfulNodes)
X = []
while len(X) < k and len(S + X) > k:
currentVal = f.function(X)
logging.info([
currentVal, 'ground set remaining:',
len(S), 'size of current solution:',
len(X)
])
samples = []
samplesVal = []
# PARALLELIZE THIS LOOP it is emb. parallel
def sample_elements(samples, samplesVal):
#logging.info(len(S), 'is len(S)'
sample = sampleS(S, k / r)
#logging.info(len(S), 'is len(S);', k/r, 'is k/r', k,'is k', r, 'is r', len(sample), 'is len sample'
sampleVal = functionMarg_closure(sample, X)
samplesVal.append(sampleVal)
samples.append(sample)
if parallel:
manager = multiprocessing.Manager()
samples = manager.list()
samplesVal = manager.list()
jobs = []
for i in range(numSamples):
p = multiprocessing.Process(
target=sample_elements, args=(samples, samplesVal))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
samples = list(samples)
samplesVal = list(samplesVal)
else:
samples = []
samplesVal = []
for i in range(numSamples):
sample_elements(samples, samplesVal)
maxSampleVal = max(samplesVal)
#print 'max sample val / len', maxSampleVal/np.float(k/r), 'avg sample val', np.mean(samplesVal)/np.float(k/r)
#print 'max sample val / len', maxSampleVal, 'avg sample val', np.mean(samplesVal)
bestSample = samples[samplesVal.index(maxSampleVal)]
if maxSampleVal >= (opt - currentVal) / (alpha1 * float(r)):
X += bestSample
#logging.info(len(X), 'is len(X)'
for element in bestSample:
S.remove(element)
#logging.info(len(S), 'is len(S) after removing an element from best sample'
# Now we need to do some bookkeeping just for the audio de-noising objective:
for I_ind_k_min in bestSample:
r_k_min = f.R[:, I_ind_k_min]
#tmp.append(LA.norm(r_k, 1) / LA.norm(r_k, 2))
#
#k_min = tmp[ind_k_min]
k_min = LA.norm(r_k_min, 1) / LA.norm(r_k_min, 2)
#print 'k_min added to soln', k_min
# print 'k_min in best', k_min
f.k_min_data.append(k_min) # This is just for logging purposes
#
f.k_min_sum = f.k_min_sum + k_min
#logging.info(k_min
#
#r_k_min = f.R[:, I_ind_k_min]
#
# Set the l-th atom to equal to normalized r_k
psi = r_k_min / LA.norm(r_k_min, 2)
#
# Add to the dictionary D and its index and shrinking set I
f.D.append(psi)
f.set_ind.append(I_ind_k_min)
#
# Compute the new residual for all columns k
for kk in f.I:
r_kk = f.R[:, kk]
alpha = np.dot(r_kk, psi)
f.R[:, kk] = r_kk - np.dot(psi, alpha)
#
f.I = np.delete(f.I, [I_ind_k_min])
if compute_rmse: # Note the variables below are all temp versions of the 'real' ones.
D = np.vstack(f.D).T
I = f.I
X_t = np.dot(np.dot(D, D.T), f.X)
s_rec = unbuffer(X_t, f.L - f.M)
f.rmse.append(
linalg.norm(signal - s_rec[0:len(signal)] / np.max(s_rec))
) # omitted padding at end of s_rec
#print 'residual self.R is', f.R.shape, 'ground set I (minframe) is', f.I.shape, 'dictionary D is', np.vstack(f.D).T.shape
else:
logging.info(
"NEED TO DO FILTERING STEP, BUT I HAVEN'T CODED THE functionMARG to handle this yet so breaking"
)
break
# newS = copy.deepcopy(S)
# samples = []
# for i in xrange(numSamples/200):
# samples.append(sampleS(S,k/r))
# for element in S:
# sumMargElements = 0
# count = 0
# for sample in samples:
# if not element in sample:
# sumMargElements += f.functionMarg([element], sample + X)
# count += 1
# if sumMargElements / count < (opt - currentVal) / (alpha2*float(k)):
# newS.remove(element)
# S = newS
if len(S + X) <= k:
logging.info('NOT ENOUGH ELEMENTS left in ground set S')
X = S + X
#logging.info(f.function(X), len(S), len(X))
return X
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--fraction_to_drop',
default=0.11,
type=float,
help='fraction_to_drop')
parser.add_argument('--r', default=10, type=int, help='r')
parser.add_argument('--k', default=80, type=int, help='')
parser.add_argument('--audio', default='alexa', type=str, help='')
parser.add_argument('--num_samples', default=36 * 4, type=int, help='r')
parser.add_argument('--speed_over_accuracy', default=1, type=int, help='')
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s: %(message)s',
level='INFO',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename='adaptive_%s_%d_%d_%d.log' %
(args.audio, args.k, args.num_samples, args.speed_over_accuracy),
filemode='w')
logging.info(args)
### Params ###
audio = args.audio
L = 512 # frame length
M = 500 # overlapping windows
my_n_iter = 100
parallel = True # parallelize the inner for loop of adaptive sampling
# FOR SPEED TESTS do NOTTT compute the rmse. it's super slow. we just use it to plot stuff if we want.
compute_rmse = False
fraction_to_drop = args.fraction_to_drop
k = args.k # iterations in original GAD algo
#numSamples = 24
numSamples = args.num_samples
r = args.r # rounds of adaptive sampling
#r = k+1-1
opt = 1.0 # small so we don't do filtering subroutine as I haven't written that part :)
alpha1 = 1.0
alpha2 = 1.0
speed_over_accuracy = args.speed_over_accuracy # IF true, we assume the fn is modular within rounds and speed things up a lot!
# We do that by using functionMarg instead of functionMarg_better. May sacrifice some performance, though.
#############
num_cores = multiprocessing.cpu_count()
logging.info('num_cores:')
logging.info(num_cores)
params = {
'rule_1': {
'n_iter': my_n_iter
},
'rule_2': {
'error': 10**-7
},
'verbose': True
}
if parallel:
logging.info("Parallelize sampling.")
else:
logging.info("Do not parallelize sampling.")
#
#signal, fs = librosa.core.load('./dataset/source2.wav', 44100)
#signal2, fs2 = sf.read('./dataset/source1.wav', samplerate=fs)
# signal, fs = librosa.core.load('./dataset/alexa_demo.m4a', 44100)
signal, fs = librosa.core.load('./dataset/' + audio + '.m4a', 44100)
n_to_drop = int(fraction_to_drop * signal.shape[0])
drop_idx = np.random.choice(
range(signal.shape[0]), n_to_drop, replace=True)
signal[drop_idx] = 0
#
# signal_original = signal.copy()
# signal_segments = np.array_split(signal, 4)
# signal_reconstructed = []
# # # Normalize the signal
# # normalizing = linalg.norm(signal)
# # signal /= normalizing
# #
# # # Make some noise
# # # creating noisy mix
# # rng = np.random.RandomState(42)
# # noise = rng.randn(*signal.shape)
# # noise *= 0.3 / linalg.norm(noise)
# # signal = signal+noise
# #
# # Signal drop noise:
# for ii in range(len(signal_segments)):
# signal = signal_segments[ii]
# n_to_drop = int(fraction_to_drop * signal.shape[0])
# drop_idx = np.random.choice(
# range(signal.shape[0]), n_to_drop, replace=True)
# signal[drop_idx] = 0
# # plt.close('all')
# # plt.figure()
# # plt.plot(signal, 'k', alpha=0.3)
# # plt.plot(signal_original, 'r:', alpha=0.3, linewidth=1.0)
# # plt.legend(('Noisy', 'Clean'))
# # plt.title('')
# # plt.show()
# X_tmp = buffer(signal, L, M)
# X = np.vstack(X_tmp).T.astype('float')
# # Initialize class with the buffered song X and the objective function
# if compute_rmse:
# # FOR PLOT GENERATION USE:
# f = SpeechDenoise(X, params, M, signal)
# else:
# f = SpeechDenoise(X, params, M)
# logging.info("START")
# solution_elements = adaptiveSampling_adam(
# f, k, numSamples, r, opt, alpha1, alpha2, compute_rmse,
# speed_over_accuracy, parallel)
# # Put the output back into the form of the original song
# D_stack = np.vstack(f.D).T
# X_t = np.dot(np.dot(D_stack, D_stack.T), X)
# s_rec = unbuffer(X_t, L - M)
# s_rec = s_rec[0:len(signal)]
# signal_reconstructed.append(s_rec)
# signal_reconstructed_unnest = [item for sublist in signal_reconstructed for item in sublist]
# print 'LEN STITCHED BACK TOGETHER =', len(signal_reconstructed_unnest)
# print 'LEN ORIGINAL WAS =', len(signal_original)
# s_rec = signal_reconstructed_unnest # so the rest of the code will work
# #print f.rmse
# logging.info("STOP")
# #######################################
# # THIS IS WHERE THE TIMER SHOULD STOP #
# #######################################
# # # PLOTS
# # if compute_rmse:
# # plt.close('all')
# # plt.figure()
# # plt.plot(f.rmse, 'r:', alpha=0.8)
# # plt.title('RMSE: Original track (without noise) vs. Denoised track')
# # plt.show()
# # avg_sparsity_of_samples_added_per_round = []
# # for rd in range(r):
# # idx_left = rd * r
# # idx_right = rd * r + r
# # avg_sparsity_of_samples_added_per_round.append(
# # np.mean(f.k_min_data[idx_left:idx_right]))
# # plt.close('all')
# # plt.figure()
# # plt.plot(avg_sparsity_of_samples_added_per_round, 'b', alpha=0.8)
# # plt.title('avg. sparsity values of elements per round')
# # plt.show()
# # plt.close('all')
# # plt.figure()
# # plt.plot(signal, 'k', alpha=0.3)
# # plt.plot(signal_original, 'r:', alpha=0.3, linewidth=1.0)
# # plt.plot(s_rec / max(s_rec), 'b', alpha=0.3, linewidth=1.0)
# # plt.legend(('Noisy', 'Clean', 'Denoised Estimate'))
# # plt.title('')
# # plt.show()
# # plt.close('all')
# # plt.figure()
# # plt.plot(D_stack[0], 'm', alpha=0.3)
# # plt.title('First dictionary atom (element) added to the solution')
# # plt.show()
# #logging.info('s_rec', s_rec)
# Output the WAV files. Note we also re-make the original, as encoding degrades (so it's only fair)
# librosa.output.write_wav("original.wav", signal_original, fs)
librosa.output.write_wav(
"dataset/noisy_%s_%s.wav" % (args.audio, str(fraction_to_drop)),
signal, fs)
# librosa.output.write_wav(
# "dataset/adaptive_%s_%s_%d_%d_%d_%d.wav" %
# (audio, str(fraction_to_drop), k, r, numSamples, speed_over_accuracy),
# s_rec / np.max(s_rec), fs)
| [
"numpy.hanning",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"sys.exit",
"numpy.linalg.norm",
"copy.deepcopy",
"copy.copy",
"logging.info",
"numpy.arange",
"numpy.multiply",
"librosa.core.load",
"argparse.ArgumentParser",
"numpy.delete",
"numpy.max",
"numpy.dot",
"numpy.vst... | [((703, 757), 'logging.info', 'logging.info', (["('The signal length is %s: ' % len_signal)"], {}), "('The signal length is %s: ' % len_signal)\n", (715, 757), False, 'import logging\n'), ((836, 888), 'logging.info', 'logging.info', (['("The number of frames \'K\' is %s: " % K)'], {}), '("The number of frames \'K\' is %s: " % K)\n', (848, 888), False, 'import logging\n'), ((897, 953), 'logging.info', 'logging.info', (['("The length of each frame \'L\' is %s: " % L)'], {}), '("The length of each frame \'L\' is %s: " % L)\n', (909, 953), False, 'import logging\n'), ((1701, 1716), 'numpy.arange', 'np.arange', (['(0)', 'N'], {}), '(0, N)\n', (1710, 1716), True, 'import numpy as np\n'), ((1725, 1736), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (1733, 1736), True, 'import numpy as np\n'), ((1745, 1758), 'numpy.hanning', 'np.hanning', (['N'], {}), '(N)\n', (1755, 1758), True, 'import numpy as np\n'), ((8772, 8804), 'copy.deepcopy', 'copy.deepcopy', (['f.meaningfulNodes'], {}), '(f.meaningfulNodes)\n', (8785, 8804), False, 'import copy\n'), ((13755, 13780), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13778, 13780), False, 'import argparse\n'), ((14306, 14537), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s: %(message)s"""', 'level': '"""INFO"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'filename': "('adaptive_%s_%d_%d_%d.log' % (args.audio, args.k, args.num_samples, args.\n speed_over_accuracy))", 'filemode': '"""w"""'}), "(format='%(asctime)s: %(message)s', level='INFO',\n datefmt='%m/%d/%Y %I:%M:%S %p', filename='adaptive_%s_%d_%d_%d.log' % (\n args.audio, args.k, args.num_samples, args.speed_over_accuracy),\n filemode='w')\n", (14325, 14537), False, 'import logging\n'), ((14579, 14597), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (14591, 14597), False, 'import logging\n'), ((15546, 15573), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (15571, 15573), False, 'import multiprocessing\n'), ((15578, 15604), 'logging.info', 'logging.info', (['"""num_cores:"""'], {}), "('num_cores:')\n", (15590, 15604), False, 'import logging\n'), ((15609, 15632), 'logging.info', 'logging.info', (['num_cores'], {}), '(num_cores)\n', (15621, 15632), False, 'import logging\n'), ((16159, 16214), 'librosa.core.load', 'librosa.core.load', (["('./dataset/' + audio + '.m4a')", '(44100)'], {}), "('./dataset/' + audio + '.m4a', 44100)\n", (16176, 16214), False, 'import librosa\n'), ((551, 629), 'logging.info', 'logging.info', (['"""Error: Overlapping windows cannot be larger than frame length!"""'], {}), "('Error: Overlapping windows cannot be larger than frame length!')\n", (563, 629), False, 'import logging\n'), ((651, 661), 'sys.exit', 'sys.exit', ([], {}), '()\n', (659, 661), False, 'import sys\n'), ((2721, 2741), 'numpy.arange', 'np.arange', (['(0)', 'self.L'], {}), '(0, self.L)\n', (2730, 2741), True, 'import numpy as np\n'), ((4750, 4767), 'copy.copy', 'copy.copy', (['self.R'], {}), '(self.R)\n', (4759, 4767), False, 'import copy\n'), ((5805, 5822), 'copy.copy', 'copy.copy', (['self.R'], {}), '(self.R)\n', (5814, 5822), False, 'import copy\n'), ((5885, 5902), 'copy.copy', 'copy.copy', (['self.D'], {}), '(self.D)\n', (5894, 5902), False, 'import copy\n'), ((5920, 5937), 'copy.copy', 'copy.copy', (['self.I'], {}), '(self.I)\n', (5929, 5937), False, 'import copy\n'), ((5963, 5988), 'copy.copy', 'copy.copy', (['self.k_min_sum'], {}), '(self.k_min_sum)\n', (5972, 5988), False, 'import copy\n'), ((13575, 13631), 'logging.info', 'logging.info', (['"""NOT ENOUGH ELEMENTS left in ground set S"""'], {}), "('NOT ENOUGH ELEMENTS left in ground set S')\n", (13587, 13631), False, 'import logging\n'), ((15826, 15863), 'logging.info', 'logging.info', (['"""Parallelize sampling."""'], {}), "('Parallelize sampling.')\n", (15838, 15863), False, 'import logging\n'), ((15882, 15926), 'logging.info', 'logging.info', (['"""Do not parallelize sampling."""'], {}), "('Do not parallelize sampling.')\n", (15894, 15926), False, 'import logging\n'), ((774, 797), 'numpy.ceil', 'np.ceil', (['(len_signal / L)'], {}), '(len_signal / L)\n', (781, 797), True, 'import numpy as np\n'), ((1808, 1831), 'numpy.multiply', 'np.multiply', (['H', 'X[:, k]'], {}), '(H, X[:, k])\n', (1819, 1831), True, 'import numpy as np\n'), ((7439, 7471), 'numpy.delete', 'np.delete', (['I_copy', '[I_ind_k_min]'], {}), '(I_copy, [I_ind_k_min])\n', (7448, 7471), True, 'import numpy as np\n'), ((9568, 9593), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (9591, 9593), False, 'import multiprocessing\n'), ((12757, 12877), 'logging.info', 'logging.info', (['"""NEED TO DO FILTERING STEP, BUT I HAVEN\'T CODED THE functionMARG to handle this yet so breaking"""'], {}), '(\n "NEED TO DO FILTERING STEP, BUT I HAVEN\'T CODED THE functionMARG to handle this yet so breaking"\n )\n', (12769, 12877), False, 'import logging\n'), ((1331, 1342), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (1339, 1342), True, 'import numpy as np\n'), ((6569, 6584), 'numpy.linalg.norm', 'LA.norm', (['r_k', '(1)'], {}), '(r_k, 1)\n', (6576, 6584), True, 'from numpy import linalg as LA\n'), ((6587, 6602), 'numpy.linalg.norm', 'LA.norm', (['r_k', '(2)'], {}), '(r_k, 2)\n', (6594, 6602), True, 'from numpy import linalg as LA\n'), ((7004, 7023), 'numpy.linalg.norm', 'LA.norm', (['r_k_min', '(2)'], {}), '(r_k_min, 2)\n', (7011, 7023), True, 'from numpy import linalg as LA\n'), ((7335, 7352), 'numpy.dot', 'np.dot', (['r_kk', 'psi'], {}), '(r_kk, psi)\n', (7341, 7352), True, 'import numpy as np\n'), ((9753, 9828), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'sample_elements', 'args': '(samples, samplesVal)'}), '(target=sample_elements, args=(samples, samplesVal))\n', (9776, 9828), False, 'import multiprocessing\n'), ((12141, 12170), 'numpy.delete', 'np.delete', (['f.I', '[I_ind_k_min]'], {}), '(f.I, [I_ind_k_min])\n', (12150, 12170), True, 'import numpy as np\n'), ((4965, 4999), 'numpy.linalg.norm', 'LA.norm', (['R_copy[:, I_ind_k_min]', '(1)'], {}), '(R_copy[:, I_ind_k_min], 1)\n', (4972, 4999), True, 'from numpy import linalg as LA\n'), ((5002, 5036), 'numpy.linalg.norm', 'LA.norm', (['R_copy[:, I_ind_k_min]', '(2)'], {}), '(R_copy[:, I_ind_k_min], 2)\n', (5009, 5036), True, 'from numpy import linalg as LA\n'), ((7392, 7410), 'numpy.dot', 'np.dot', (['psi', 'alpha'], {}), '(psi, alpha)\n', (7398, 7410), True, 'import numpy as np\n'), ((11150, 11169), 'numpy.linalg.norm', 'LA.norm', (['r_k_min', '(1)'], {}), '(r_k_min, 1)\n', (11157, 11169), True, 'from numpy import linalg as LA\n'), ((11172, 11191), 'numpy.linalg.norm', 'LA.norm', (['r_k_min', '(2)'], {}), '(r_k_min, 2)\n', (11179, 11191), True, 'from numpy import linalg as LA\n'), ((11668, 11687), 'numpy.linalg.norm', 'LA.norm', (['r_k_min', '(2)'], {}), '(r_k_min, 2)\n', (11675, 11687), True, 'from numpy import linalg as LA\n'), ((12036, 12053), 'numpy.dot', 'np.dot', (['r_kk', 'psi'], {}), '(r_kk, psi)\n', (12042, 12053), True, 'import numpy as np\n'), ((12291, 12305), 'numpy.vstack', 'np.vstack', (['f.D'], {}), '(f.D)\n', (12300, 12305), True, 'import numpy as np\n'), ((12361, 12375), 'numpy.dot', 'np.dot', (['D', 'D.T'], {}), '(D, D.T)\n', (12367, 12375), True, 'import numpy as np\n'), ((12094, 12112), 'numpy.dot', 'np.dot', (['psi', 'alpha'], {}), '(psi, alpha)\n', (12100, 12112), True, 'import numpy as np\n'), ((12526, 12539), 'numpy.max', 'np.max', (['s_rec'], {}), '(s_rec)\n', (12532, 12539), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 12:23:43 2018
@author: antony
"""
import glob
import numpy as np
import pandas as pd
import sys
import matplotlib
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import libplot
class _MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class ReplotGSEA(object):
def __init__(self, dir):
self.__dir = dir
def replot(gene_set, phenoPos, phenoNeg, ranking_file, hit_file, nes, pval, fdr):
"""
Replot existing GSEA plot to make it better for publications
"""
libplot.setup()
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['mathtext.default'] = 'regular'
ranking_file=glob.glob(self.__dir, 'ranked_gene_list')
print(ranking_file)
# import the rankings
rank_data = pd.read_table(ranking_file, sep="\t", header=0, index_col=0)
hit_data = pd.read_table(hit_file, sep="\t", header=0, index_col=0)
#dataFrame of ranked matrix scores
x = np.arange(rank_data.shape[0])
# the rankings of every gene
rankings = rank_data.iloc[:, 3].values
# boost values to saturate colors
heat_map_rankings = rankings * 2
hit_ind = hit_data.iloc[:, 4].values
RES = hit_data.iloc[:, 6].values
x2 = hit_ind.tolist()
y2 = RES.tolist()
# plt.style.use('classic')
# center color map at midpoint = 0
norm = _MidpointNormalize(vmin=np.min(rankings), midpoint=0, vmax=np.max(rankings))
if x2[0] != 0:
x2.insert(0, 0)
y2.insert(0, 0)
if x2[len(x2) - 1] != rank_data.shape[0]:
x2.append(rank_data.shape[0])
y2.append(0)
# figsize = (6,6)
phenoP_label = phenoPos + ' (positively correlated)'
phenoN_label = phenoNeg + ' (negatively correlated)'
zero_score_ind = np.abs(rankings).argmin()
z_score_label = 'Zero cross at ' + str(zero_score_ind)
nes_label = 'NES: '+ "{:.3f}".format(float(nes))
pval_label = 'Pval: '+ "{:.3f}".format(float(pval))
fdr_label = 'FDR: '+ "{:.3f}".format(float(fdr))
im_matrix = np.tile(heat_map_rankings, (2,1))
# output truetype
#plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have mangy plots, so do not display plots
# It's also convinient to run this script on command line.
# GSEA Plots
gs = plt.GridSpec(16,1)
# fig = plt.figure(figsize=figsize)
fig = plt.figure(figsize=(10, 7))
# Ranked Metric Scores Plot
ax1 = fig.add_subplot(gs[9:])
ax1.fill_between(x, y1=rankings, y2=0, color='#2c5aa0')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, phenoP_label, color='red', horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, phenoN_label, color='Blue', horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
# the x coords of this transformation are data, and the y coord are axes
trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)
ax1.vlines(zero_score_ind, 0, 1, linewidth=1, transform=trans1, linestyles='--', color='grey')
ax1.text(zero_score_ind, 0.5, z_score_label,
horizontalalignment='center',
verticalalignment='center',
transform=trans1)
ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_color('dimgray')
ax1.spines['bottom'].set_color('dimgray') #set_color('dimgray')
#ax1.tick_params(axis='both', which='both', top='off', right='off', left='off')
ax1.locator_params(axis='y', nbins=5)
ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))
# use round method to control float number
# ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))
# gene hits
ax2 = fig.add_subplot(gs[7:9], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(hit_ind, 0, 1, linewidth=.5, transform=trans2)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='off', right='off', left='off', labelleft='off')
# colormap
ax3 = fig.add_subplot(gs[8:10], sharex=ax1)
ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=plt.cm.seismic, interpolation='none') # cm.coolwarm
ax3.spines['top'].set_visible(False)
ax3.spines['left'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='off', right='off', left='off',labelleft='off')
# Enrichment score plot
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
ax4.plot(x2, y2, linewidth=4, color ='#2ca05a')
ax4.tick_params(axis='both', which='both', color='dimgray')
ax4.spines['left'].set_color('dimgray')
ax4.spines['bottom'].set_visible(False) #set_color('dimgray')
ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)
ax4.text(.1, .2, pval_label, transform=ax4.transAxes)
ax4.text(.1, .3, nes_label, transform=ax4.transAxes)
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
# fig adjustment
fig.suptitle(gene_set, fontsize=16)
fig.subplots_adjust(hspace=0)
# fig.tight_layout()
# plt.close(fig)
gene_set = gene_set.replace('/','_').replace(":","_")
out = '{}_{}.pdf'.format('gsea_plot', gene_set)
fig.tight_layout(pad=2) #rect=[o, o, w, w])
plt.savefig(out, dpi=600)
| [
"numpy.tile",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.arange",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.figure",
"libplot.setup",
"pandas.read_table",
"numpy.interp",
"matplotlib.transforms.blended_transform_factory",
"matplotlib.colors.Normalize.__... | [((471, 513), 'matplotlib.colors.Normalize.__init__', 'Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (489, 513), False, 'from matplotlib.colors import Normalize\n'), ((1075, 1090), 'libplot.setup', 'libplot.setup', ([], {}), '()\n', (1088, 1090), False, 'import libplot\n'), ((1236, 1277), 'glob.glob', 'glob.glob', (['self.__dir', '"""ranked_gene_list"""'], {}), "(self.__dir, 'ranked_gene_list')\n", (1245, 1277), False, 'import glob\n'), ((1401, 1461), 'pandas.read_table', 'pd.read_table', (['ranking_file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(ranking_file, sep='\\t', header=0, index_col=0)\n", (1414, 1461), True, 'import pandas as pd\n'), ((1481, 1537), 'pandas.read_table', 'pd.read_table', (['hit_file'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(hit_file, sep='\\t', header=0, index_col=0)\n", (1494, 1537), True, 'import pandas as pd\n'), ((1638, 1667), 'numpy.arange', 'np.arange', (['rank_data.shape[0]'], {}), '(rank_data.shape[0])\n', (1647, 1667), True, 'import numpy as np\n'), ((2917, 2951), 'numpy.tile', 'np.tile', (['heat_map_rankings', '(2, 1)'], {}), '(heat_map_rankings, (2, 1))\n', (2924, 2951), True, 'import numpy as np\n'), ((3237, 3256), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(16)', '(1)'], {}), '(16, 1)\n', (3249, 3256), True, 'import matplotlib.pyplot as plt\n'), ((3314, 3341), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (3324, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3965, 4031), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax1.transData', 'ax1.transAxes'], {}), '(ax1.transData, ax1.transAxes)\n', (4001, 4031), True, 'import matplotlib.transforms as transforms\n'), ((5192, 5258), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax2.transData', 'ax2.transAxes'], {}), '(ax2.transData, ax2.transAxes)\n', (5228, 5258), True, 'import matplotlib.transforms as transforms\n'), ((6836, 6902), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax4.transAxes', 'ax4.transData'], {}), '(ax4.transAxes, ax4.transData)\n', (6872, 6902), True, 'import matplotlib.transforms as transforms\n'), ((7924, 7949), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out'], {'dpi': '(600)'}), '(out, dpi=600)\n', (7935, 7949), True, 'import matplotlib.pyplot as plt\n'), ((760, 782), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (769, 782), True, 'import numpy as np\n'), ((2157, 2173), 'numpy.min', 'np.min', (['rankings'], {}), '(rankings)\n', (2163, 2173), True, 'import numpy as np\n'), ((2192, 2208), 'numpy.max', 'np.max', (['rankings'], {}), '(rankings)\n', (2198, 2208), True, 'import numpy as np\n'), ((2634, 2650), 'numpy.abs', 'np.abs', (['rankings'], {}), '(rankings)\n', (2640, 2650), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Resizes a convolution kernel, by adding random fill around the border.
"""
import numpy as np
import os
os.environ['NO_TF'] = '1'
import argparse
import util
def main():
p = argparse.ArgumentParser(description =
'Given two images, determine the convolution kernel so that '
'a * k = b')
p.add_argument('ka', help='input kernel')
p.add_argument('kb', help='output kernel directory')
p.add_argument('n', type=int, help='output kernel size')
p.add_argument('-mul', type=float, default=.5,
help='multiplier for random fill')
p.add_argument('-norm', type=float, default=0,
help='normalize sum to this amount (default: zero: no normalization)')
args = p.parse_args()
os.mkdir(args.kb)
step, kernel = util.load_kernel(args.ka)
print('input kernel size', kernel.shape)
e = np.mean(np.abs(kernel))
print('input kernel mean', e)
na = kernel.shape[0]
nb = args.n
outk = np.random.normal(size=(nb, nb, 1, 1)).astype(np.float32)
oute = np.mean(np.abs(outk))
outk *= e * args.mul / oute
if nb > na:
h = (nb - na) // 2
print('grow: offset', h)
outk[h:h+na, h:h+na, :, :] = kernel
else:
h = (na - nb) // 2
print('shrink: offset', h)
outk = kernel[h:h+nb, h:h+nb, :, :]
if args.norm != 0:
outk = outk / np.sum(outk) * args.norm
oute = np.mean(np.abs(outk))
print('output kernel mean', oute)
util.save_kernel(args.kb, step, outk)
print('resized from', kernel.shape, 'to', outk.shape)
if __name__ == '__main__':
main()
# vim:set ts=2 sw=2 sts=2 et:
| [
"numpy.random.normal",
"numpy.abs",
"argparse.ArgumentParser",
"util.load_kernel",
"util.save_kernel",
"numpy.sum",
"os.mkdir"
] | [((204, 316), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Given two images, determine the convolution kernel so that a * k = b"""'}), "(description=\n 'Given two images, determine the convolution kernel so that a * k = b')\n", (227, 316), False, 'import argparse\n'), ((730, 747), 'os.mkdir', 'os.mkdir', (['args.kb'], {}), '(args.kb)\n', (738, 747), False, 'import os\n'), ((765, 790), 'util.load_kernel', 'util.load_kernel', (['args.ka'], {}), '(args.ka)\n', (781, 790), False, 'import util\n'), ((1408, 1445), 'util.save_kernel', 'util.save_kernel', (['args.kb', 'step', 'outk'], {}), '(args.kb, step, outk)\n', (1424, 1445), False, 'import util\n'), ((849, 863), 'numpy.abs', 'np.abs', (['kernel'], {}), '(kernel)\n', (855, 863), True, 'import numpy as np\n'), ((1019, 1031), 'numpy.abs', 'np.abs', (['outk'], {}), '(outk)\n', (1025, 1031), True, 'import numpy as np\n'), ((1355, 1367), 'numpy.abs', 'np.abs', (['outk'], {}), '(outk)\n', (1361, 1367), True, 'import numpy as np\n'), ((945, 982), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(nb, nb, 1, 1)'}), '(size=(nb, nb, 1, 1))\n', (961, 982), True, 'import numpy as np\n'), ((1312, 1324), 'numpy.sum', 'np.sum', (['outk'], {}), '(outk)\n', (1318, 1324), True, 'import numpy as np\n')] |
from .mcmcposteriorsamplergamma import fit
from scipy.stats import norm, gamma
import pandas as pd
import numpy as np
import pickle as pk
from ..shared_functions import *
class mcmcsamplergamma:
"""
Class for the mcmc sampler of the deconvolution gaussian model
"""
def __init__(self, K=1, Kc=1, alpha = 1, alphac = 1):
"""
Constructor of the class
Parameters
-------------
K: int, Number of components of the noise distribution
Kc: int, Number of components of the convolved distribution
**kwargs:
alpha: float, parameter to determine the hyperprior of the noise weight components
alphac: float, parameter to determine the hyperprior of the target weight components
"""
self.K = K
self.Kc = Kc
self.alpha = alpha
self.alphac = alphac
self.fitted = False
return
def fit(self, dataNoise, dataConvolution, iterations = 1000, ignored_iterations = 1000, chains = 1,
priors = None,
precission = 0.99, method = "moments", bias = None,
initial_conditions = [], show_progress = True, seed = 0):
"""
Fit the model to the posterior distribution
Parameters
-------------
dataNoise: list/npArray, 1D array witht he data of the noise
dataConvolution: list/npArray, 1D array witht he data of the convolution
iterations: int, number of samples to be drawn and stored for each chain during the sampling
ignored_iterations: int, number of samples to be drawn and ignored for each chain during the sampling
chains: int, number of independently initialised realisations of the markov chain
priors: array, parameter of the prior gamma distribution acording to the definition of the wikipedia
kconst: float, parameter k of the prior gamma distribution
initialConditions: list, 1D array with all the parameters required to initialise manually all the components of all the chains the chains
show_progress: bool, indicate if the method should show the progress in the generation of the new data
seed: int, value to initialise the random generator and obtain reproducible results
Returns
---------------
Nothing
"""
self.data = dataNoise
self.datac = dataConvolution
self.iterations = iterations
self.ignored_iterations = ignored_iterations
self.chains = chains
if bias == None:
m = np.min([dataNoise,dataConvolution])
if m < 0:
self.bias = m - 0.01
else:
self.bias = 0
elif bias < np.min([dataNoise,dataConvolution]):
self.bias = bias
else:
self.bias = np.min([dataNoise,dataConvolution])*0.9999
if priors==None:
m = np.mean(dataNoise-self.bias)
v = np.var(dataNoise-self.bias)
self.priortheta_theta = 100*v/m
self.priork_theta = 100*v/m
self.priortheta_k = 1.1
self.priork_k = 1.1
m = np.mean(dataConvolution-self.bias)
v = np.var(dataConvolution-self.bias)
self.priortheta_thetac = 100*v/m
self.priork_thetac = 100*v/m
self.priortheta_kc = 1.1
self.priork_kc = 1.1
self.precission = precission
self.method = method
self.samples = np.array(fit(dataNoise-self.bias, dataConvolution-self.bias,
self.ignored_iterations, self.iterations, self.chains,
self.K, self.Kc,
self.alpha, self.alphac,
self.priortheta_k, self.priortheta_theta, self.priork_k, self.priork_theta,
self.priortheta_kc, self.priortheta_thetac, self.priork_kc, self.priork_thetac,
0,
self.precission, self.method,
initial_conditions, show_progress, seed))
self.fitted = True
return
def save(self, name):
"""
Pickle save the model.
Parameters
----------------
name: string, name in which to store the model
Return:
nothing
"""
if self.fitted:
pickling_on = open(name+".pickle","wb")
pk.dump({"K":self.K, "Kc":self.Kc, "alpha": self.alpha, "alphac": self.alphac, "iterations": self.iterations,
"ignored_iterations": self.ignored_iterations,
"priortheta_k": self.priortheta_k, "priortheta_theta": self.priortheta_theta, "priork_k": self.priork_k,
"priork_theta": self.priork_theta, "priortheta_kc": self.priortheta_kc, "priortheta_thetac": self.priortheta_thetac,
"priortheta_thetac": self.priortheta_thetac, "priork_thetac": self.priork_thetac,
"bias":self.bias, "chains":self.chains, "samples":self.samples}, pickling_on)
pickling_on.close()
else:
print("The model has not been fitted so there is nothing to save.")
return
def load(self, name):
"""
Pickle load the model.
Parameters
----------------
name: string, name from which to recover the model
Return:
nothing
"""
pickle_off = open(name+".pickle","rb")
aux = pk.load(pickle_off)
pickle_off.close()
self.K = aux["K"]
self.Kc = aux ["Kc"]
self.alpha = aux["alpha"]
self.alphac = aux["alphac"]
self.iterations = aux["iterations"]
self.ignored_iterations = aux["ignored_iterations"]
self.chains = aux["chains"]
self.samples = aux["samples"]
self.priortheta_k = aux["priortheta_k"]
self.priortheta_theta = aux["priortheta_theta"]
self.priork_k = aux["priork_k"]
self.priork_theta = aux["priork_theta"]
self.priortheta_kc = aux["priortheta_kc"]
self.priortheta_thetac = aux["priortheta_thetac"]
self.priortheta_thetac = aux["priortheta_thetac"]
self.priork_thetac = aux["priork_thetac"]
self.bias = aux["bias"]
self.fitted = True
return
def sample_autofluorescence(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the noise distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_autofluorescence_gamma(self.samples,self.K,self.Kc,size))
def sample_deconvolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the deconvolved distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_deconvolution_gamma(self.samples,self.K,self.Kc,size))
def sample_convolution(self, size = 1, style = "full", pos = None):
"""
Generate samples from the fitted posterior distribution according to the convolved distribution
Parameters
-------------
size: int, number of samples to be drawn
Returns
-------------
list: list, 1D array with *size* samples from the model
"""
if style=="full":
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size, bias=0))+self.bias
elif style=="single":
if pos == None:
pos = np.random.choice(range(len(self.samples)))
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
else:
return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size=size,pos=pos, bias=0))+self.bias
# return np.array(sample_convolution_gamma(self.samples,self.K,self.Kc,size))
def score_autofluorescence(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for k in range(self.K):
thetastar = self.samples[i,self.K+k]
kconststar = self.samples[i,2*self.K+k]
y += self.samples[i,k]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_deconvolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the deconvolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
thetastar = self.samples[i,3*self.K+self.Kc+j]
kconststar = self.samples[i,3*self.K+2*self.Kc+j]
y += self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def score_convolution(self, x, percentiles = [5, 95], size = 100):
"""
Evaluate the mean and percentiles of the the pdf at certain position acording to the convolved distribution
Parameters
-------------
x: list/array, positions where to evaluate the distribution
percentiles: list/array, percentiles to be evaluated
size: int, number of samples to draw from the posterior to make the statistics, bigger numbers give more stability
Returns
-------------
list: list, 2D array with the mean and all the percentile evaluations at all points in x
"""
yT = []
for l in range(size):
i = np.random.choice(self.iterations)
y = np.zeros(len(x))
for j in range(self.Kc):
for k in range(self.K):
theta1 = self.samples[i,self.K+k]
theta2 = self.samples[i,3*self.K+self.Kc+j]
k1 = self.samples[i,2*self.K+k]
k2 = self.samples[i,3*self.K+2*self.Kc+j]
mu = theta1*k1+theta2*k2
s = theta1*theta1*k1+theta2*theta2*k2
thetastar = s/mu
kconststar = mu*mu/s
y += self.samples[i,k]*self.samples[i,3*self.K+j]*gamma.pdf(x,a=kconststar,scale=thetastar)
yT.append(y)
return np.mean(yT,axis=0),np.percentile(yT,percentiles,axis=0)
def sampler_statistics(self, sort="weight"):
"""
Show statistics of correct mixing of the mcmc sampler
Args:
sort: ["weight", "none", "means"], method for sorting the samples from the different chains
Returns
-------------
DataFrame: DataFrame the mean, std, percentiles, mixing ratio(rhat) and effective number of samples for each parameter of the model
"""
self.sampler_statistics = pd.DataFrame(columns=["Mean","Std","5%","50%","95%","Rhat","Neff"])
samples = self.samples.copy()
if sort == "weight":
argsort = np.argsort(samples[:,0:self.K],axis=1)
samples[:,0:self.K] = np.take_along_axis(samples[:,0:self.K],argsort,axis=1)
samples[:,self.K:2*self.K] = np.take_along_axis(samples[:,self.K:2*self.K],argsort,axis=1)
samples[:,2*self.K:3*self.K] = np.take_along_axis(samples[:,2*self.K:3*self.K],argsort,axis=1)
argsort = np.argsort(samples[:,3*self.K:3*self.K+self.Kc],axis=1)
samples[:,3*self.K:3*self.K+self.Kc] = np.take_along_axis(samples[:,3*self.K:3*self.K+self.Kc],argsort,axis=1)
samples[:,(3*self.K+self.Kc):(3*self.K+2*self.Kc)] = np.take_along_axis(samples[:,(3*self.K+self.Kc):(3*self.K+2*self.Kc)],argsort,axis=1)
samples[:,(3*self.K+2*self.Kc):(3*self.K+3*self.Kc)] = np.take_along_axis(samples[:,(3*self.K+2*self.Kc):(3*self.K+3*self.Kc)],argsort,axis=1)
if sort == "mean":
argsort = np.argsort(samples[:,self.K:2*self.K],axis=1)
samples[:,0:self.K] = np.take_along_axis(samples[:,0:self.K],argsort,axis=1)
samples[:,self.K:2*self.K] = np.take_along_axis(samples[:,self.K:2*self.K],argsort,axis=1)
samples[:,2*self.K:3*self.K] = np.take_along_axis(samples[:,2*self.K:3*self.K],argsort,axis=1)
argsort = np.argsort(samples[:,3*self.K+self.Kc:3*self.K+2*self.Kc],axis=1)
samples[:,3*self.K:3*self.K+self.Kc] = np.take_along_axis(samples[:,3*self.K:3*self.K+self.Kc],argsort,axis=1)
samples[:,(3*self.K+self.Kc):(3*self.K+2*self.Kc)] = np.take_along_axis(samples[:,(3*self.K+self.Kc):(3*self.K+2*self.Kc)],argsort,axis=1)
samples[:,(3*self.K+2*self.Kc):(3*self.K+3*self.Kc)] = np.take_along_axis(samples[:,(3*self.K+2*self.Kc):(3*self.K+3*self.Kc)],argsort,axis=1)
measures = np.zeros(7)
for i in range(3*self.K+3*self.Kc):
measures[0] = np.mean(samples[:,i])
measures[1] = np.std(samples[:,i])
measures[2:5] = np.percentile(samples[:,i],[5,50,95])
measures[5] = rstat(samples[:,i],self.chains)
measures[6] = effnumber(samples[:,i],self.chains)
#Name the component
if i < self.K:
name = "weight_K"+str(1+i)
elif i < 2*self.K:
name = "mean_K"+str(1+i-self.K)
elif i < 3*self.K:
name = "std_K"+str(1+i-2*self.K)
elif i < 3*self.K+self.Kc:
name = "weight_Kc"+str(1+i-3*self.K)
elif i < 3*self.K+2*self.Kc:
name = "mean_Kc"+str(1+i-3*self.K-self.Kc)
else:
name = "std_Kc"+str(1+i-3*self.K-2*self.Kc)
self.sampler_statistics = self.sampler_statistics.append(pd.Series(measures, ["Mean","Std","5%","50%","95%","Rhat","Neff"], name=name))
return self.sampler_statistics
| [
"pandas.Series",
"numpy.mean",
"pickle.dump",
"numpy.random.choice",
"numpy.std",
"scipy.stats.gamma.pdf",
"pickle.load",
"numpy.argsort",
"numpy.zeros",
"numpy.min",
"pandas.DataFrame",
"numpy.percentile",
"numpy.take_along_axis",
"numpy.var"
] | [((5608, 5627), 'pickle.load', 'pk.load', (['pickle_off'], {}), '(pickle_off)\n', (5615, 5627), True, 'import pickle as pk\n'), ((13840, 13913), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Mean', 'Std', '5%', '50%', '95%', 'Rhat', 'Neff']"}), "(columns=['Mean', 'Std', '5%', '50%', '95%', 'Rhat', 'Neff'])\n", (13852, 13913), True, 'import pandas as pd\n'), ((15778, 15789), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (15786, 15789), True, 'import numpy as np\n'), ((2642, 2678), 'numpy.min', 'np.min', (['[dataNoise, dataConvolution]'], {}), '([dataNoise, dataConvolution])\n', (2648, 2678), True, 'import numpy as np\n'), ((2994, 3024), 'numpy.mean', 'np.mean', (['(dataNoise - self.bias)'], {}), '(dataNoise - self.bias)\n', (3001, 3024), True, 'import numpy as np\n'), ((3039, 3068), 'numpy.var', 'np.var', (['(dataNoise - self.bias)'], {}), '(dataNoise - self.bias)\n', (3045, 3068), True, 'import numpy as np\n'), ((3235, 3271), 'numpy.mean', 'np.mean', (['(dataConvolution - self.bias)'], {}), '(dataConvolution - self.bias)\n', (3242, 3271), True, 'import numpy as np\n'), ((3286, 3321), 'numpy.var', 'np.var', (['(dataConvolution - self.bias)'], {}), '(dataConvolution - self.bias)\n', (3292, 3321), True, 'import numpy as np\n'), ((4532, 5107), 'pickle.dump', 'pk.dump', (["{'K': self.K, 'Kc': self.Kc, 'alpha': self.alpha, 'alphac': self.alphac,\n 'iterations': self.iterations, 'ignored_iterations': self.\n ignored_iterations, 'priortheta_k': self.priortheta_k,\n 'priortheta_theta': self.priortheta_theta, 'priork_k': self.priork_k,\n 'priork_theta': self.priork_theta, 'priortheta_kc': self.priortheta_kc,\n 'priortheta_thetac': self.priortheta_thetac, 'priortheta_thetac': self.\n priortheta_thetac, 'priork_thetac': self.priork_thetac, 'bias': self.\n bias, 'chains': self.chains, 'samples': self.samples}", 'pickling_on'], {}), "({'K': self.K, 'Kc': self.Kc, 'alpha': self.alpha, 'alphac': self.\n alphac, 'iterations': self.iterations, 'ignored_iterations': self.\n ignored_iterations, 'priortheta_k': self.priortheta_k,\n 'priortheta_theta': self.priortheta_theta, 'priork_k': self.priork_k,\n 'priork_theta': self.priork_theta, 'priortheta_kc': self.priortheta_kc,\n 'priortheta_thetac': self.priortheta_thetac, 'priortheta_thetac': self.\n priortheta_thetac, 'priork_thetac': self.priork_thetac, 'bias': self.\n bias, 'chains': self.chains, 'samples': self.samples}, pickling_on)\n", (4539, 5107), True, 'import pickle as pk\n'), ((10263, 10296), 'numpy.random.choice', 'np.random.choice', (['self.iterations'], {}), '(self.iterations)\n', (10279, 10296), True, 'import numpy as np\n'), ((10619, 10638), 'numpy.mean', 'np.mean', (['yT'], {'axis': '(0)'}), '(yT, axis=0)\n', (10626, 10638), True, 'import numpy as np\n'), ((10638, 10676), 'numpy.percentile', 'np.percentile', (['yT', 'percentiles'], {'axis': '(0)'}), '(yT, percentiles, axis=0)\n', (10651, 10676), True, 'import numpy as np\n'), ((11400, 11433), 'numpy.random.choice', 'np.random.choice', (['self.iterations'], {}), '(self.iterations)\n', (11416, 11433), True, 'import numpy as np\n'), ((11786, 11805), 'numpy.mean', 'np.mean', (['yT'], {'axis': '(0)'}), '(yT, axis=0)\n', (11793, 11805), True, 'import numpy as np\n'), ((11805, 11843), 'numpy.percentile', 'np.percentile', (['yT', 'percentiles'], {'axis': '(0)'}), '(yT, percentiles, axis=0)\n', (11818, 11843), True, 'import numpy as np\n'), ((12563, 12596), 'numpy.random.choice', 'np.random.choice', (['self.iterations'], {}), '(self.iterations)\n', (12579, 12596), True, 'import numpy as np\n'), ((13295, 13314), 'numpy.mean', 'np.mean', (['yT'], {'axis': '(0)'}), '(yT, axis=0)\n', (13302, 13314), True, 'import numpy as np\n'), ((13314, 13352), 'numpy.percentile', 'np.percentile', (['yT', 'percentiles'], {'axis': '(0)'}), '(yT, percentiles, axis=0)\n', (13327, 13352), True, 'import numpy as np\n'), ((13999, 14039), 'numpy.argsort', 'np.argsort', (['samples[:, 0:self.K]'], {'axis': '(1)'}), '(samples[:, 0:self.K], axis=1)\n', (14009, 14039), True, 'import numpy as np\n'), ((14072, 14129), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 0:self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, 0:self.K], argsort, axis=1)\n', (14090, 14129), True, 'import numpy as np\n'), ((14168, 14234), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, self.K:2 * self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, self.K:2 * self.K], argsort, axis=1)\n', (14186, 14234), True, 'import numpy as np\n'), ((14273, 14343), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 2 * self.K:3 * self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, 2 * self.K:3 * self.K], argsort, axis=1)\n', (14291, 14343), True, 'import numpy as np\n'), ((14360, 14423), 'numpy.argsort', 'np.argsort', (['samples[:, 3 * self.K:3 * self.K + self.Kc]'], {'axis': '(1)'}), '(samples[:, 3 * self.K:3 * self.K + self.Kc], axis=1)\n', (14370, 14423), True, 'import numpy as np\n'), ((14467, 14552), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K:3 * self.K + self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K:3 * self.K + self.Kc], argsort, axis=1\n )\n', (14485, 14552), True, 'import numpy as np\n'), ((14604, 14703), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc\n ], argsort, axis=1)\n', (14622, 14703), True, 'import numpy as np\n'), ((14757, 14859), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K + 2 * self.Kc:3 * self.K + 3 * self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K + 2 * self.Kc:3 * self.K + 3 *\n self.Kc], argsort, axis=1)\n', (14775, 14859), True, 'import numpy as np\n'), ((14895, 14944), 'numpy.argsort', 'np.argsort', (['samples[:, self.K:2 * self.K]'], {'axis': '(1)'}), '(samples[:, self.K:2 * self.K], axis=1)\n', (14905, 14944), True, 'import numpy as np\n'), ((14975, 15032), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 0:self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, 0:self.K], argsort, axis=1)\n', (14993, 15032), True, 'import numpy as np\n'), ((15071, 15137), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, self.K:2 * self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, self.K:2 * self.K], argsort, axis=1)\n', (15089, 15137), True, 'import numpy as np\n'), ((15176, 15246), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 2 * self.K:3 * self.K]', 'argsort'], {'axis': '(1)'}), '(samples[:, 2 * self.K:3 * self.K], argsort, axis=1)\n', (15194, 15246), True, 'import numpy as np\n'), ((15263, 15340), 'numpy.argsort', 'np.argsort', (['samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc]'], {'axis': '(1)'}), '(samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc], axis=1)\n', (15273, 15340), True, 'import numpy as np\n'), ((15380, 15465), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K:3 * self.K + self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K:3 * self.K + self.Kc], argsort, axis=1\n )\n', (15398, 15465), True, 'import numpy as np\n'), ((15517, 15616), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K + self.Kc:3 * self.K + 2 * self.Kc\n ], argsort, axis=1)\n', (15535, 15616), True, 'import numpy as np\n'), ((15670, 15772), 'numpy.take_along_axis', 'np.take_along_axis', (['samples[:, 3 * self.K + 2 * self.Kc:3 * self.K + 3 * self.Kc]', 'argsort'], {'axis': '(1)'}), '(samples[:, 3 * self.K + 2 * self.Kc:3 * self.K + 3 *\n self.Kc], argsort, axis=1)\n', (15688, 15772), True, 'import numpy as np\n'), ((15860, 15882), 'numpy.mean', 'np.mean', (['samples[:, i]'], {}), '(samples[:, i])\n', (15867, 15882), True, 'import numpy as np\n'), ((15908, 15929), 'numpy.std', 'np.std', (['samples[:, i]'], {}), '(samples[:, i])\n', (15914, 15929), True, 'import numpy as np\n'), ((15957, 15998), 'numpy.percentile', 'np.percentile', (['samples[:, i]', '[5, 50, 95]'], {}), '(samples[:, i], [5, 50, 95])\n', (15970, 15998), True, 'import numpy as np\n'), ((2805, 2841), 'numpy.min', 'np.min', (['[dataNoise, dataConvolution]'], {}), '([dataNoise, dataConvolution])\n', (2811, 2841), True, 'import numpy as np\n'), ((16717, 16804), 'pandas.Series', 'pd.Series', (['measures', "['Mean', 'Std', '5%', '50%', '95%', 'Rhat', 'Neff']"], {'name': 'name'}), "(measures, ['Mean', 'Std', '5%', '50%', '95%', 'Rhat', 'Neff'],\n name=name)\n", (16726, 16804), True, 'import pandas as pd\n'), ((2909, 2945), 'numpy.min', 'np.min', (['[dataNoise, dataConvolution]'], {}), '([dataNoise, dataConvolution])\n', (2915, 2945), True, 'import numpy as np\n'), ((10535, 10578), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x'], {'a': 'kconststar', 'scale': 'thetastar'}), '(x, a=kconststar, scale=thetastar)\n', (10544, 10578), False, 'from scipy.stats import norm, gamma\n'), ((11702, 11745), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x'], {'a': 'kconststar', 'scale': 'thetastar'}), '(x, a=kconststar, scale=thetastar)\n', (11711, 11745), False, 'from scipy.stats import norm, gamma\n'), ((13211, 13254), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x'], {'a': 'kconststar', 'scale': 'thetastar'}), '(x, a=kconststar, scale=thetastar)\n', (13220, 13254), False, 'from scipy.stats import norm, gamma\n')] |
import numpy as np
import random
from tqdm import tqdm
import math
import time
# random.seed(1)
global connection_info
def generate_optimal(n):
optimal = [0, 3, 7, 8, 14, 18, 25, 26, 28, 30]
state = np.zeros(n)
for i in range(n):
if i in optimal:
state[i] = 1
else:
state[i] = 0
return state
def reinforcement_learning(alpha,beta,gamma,theta,graph,batch_size):
# alpha, beta, gamma are hyperparameters
# graph stores the graph information with numpy matrix
# pmat1~3 stores probability info
# Initialize the probability matrix
n = graph.shape[0] # node number
t = 0
max_iteration = 1000
temp_best_cost = 99999999
temp_best_state = None
pmat1 = np.zeros([1,n])
pmat2 = np.zeros([n,n])
pmat3 = np.zeros([n,n])
for i in range(n):
pmat1[0,i] = 0.5
for j in range(n):
if (i != j):
pmat2[i,j] = 0.5
pmat3[i,j] = 0.5
#global connection_info
#connection_info = dict()
# graph is a sparse matrix, so first represent it in a better way
#for i in range(n):
# temp = set()
# for j in range(n):
# if graph[i,j] == 1:
# temp.add(j)
# connection_info[i] = temp
# Generate the First State
state = generate_state(pmat1,pmat2,pmat3,graph)
for t in tqdm(range(max_iteration)):
if (random.random()<theta):
state = generate_state(pmat1,pmat2,pmat3,graph)
else:
state = generate_random_state(n, graph)
old_state_1 = state.copy()
old_state = state.copy()
state = local_search(graph,np.array(state),batch_size)
while(sum(abs(np.array(old_state) - np.array(state))) != 0):
old_state = state.copy()
state = local_search(graph,np.array(state),batch_size)
[pmat2, pmat3] = update_function(pmat1,pmat2,pmat3,old_state_1,state,calculate_conflict(state, graph),t,alpha,beta)
temp_cost = cost_function(state,graph)
if(temp_cost < temp_best_cost):
temp_best_cost = temp_cost
temp_best_state = state
if (t % 100 == 0):
print(sum(generate_state(pmat1,pmat2,pmat3,graph)))
print(sum(temp_best_state))
temp_best_state = local_search(graph,temp_best_state,len(state))
temp_best_cost = cost_function(temp_best_state,graph)
print(np.sum(pmat1))
print(np.sum(pmat2))
print(np.sum(pmat3))
print(pmat2[0,:])
print(pmat3[0,:])
print(temp_best_cost)
print(sum(temp_best_state))
def detect_conf(state, newnode, graph, n):
for i in range(n):
if state[i] == 1:
if graph[i, newnode] == 1:
return True
return False
def local_search(graph,state,batch_size):
#local search function
#run local search on batch size elements
choice = []
n = state.shape[0]
for i in range(batch_size):
choice.append(random.randint(0, n - 1))
old_state = state.copy()
for i in range(len(choice)):
if detect_conf(state, choice[i], graph, n) == False:
state[choice[i]] = 1 - state[choice[i]]
if ( cost_function(state,graph) < cost_function(old_state,graph)):
old_state[choice[i]] = 1 - old_state[choice[i]]
else:
state[choice[i]] = 1- state[choice[i]]
return old_state
def update_function(matrix1, matrix2, matrix3, old_state, state, old_conflicts, t, alpha, beta):
#update three state matrix
#t is the iteration number
#alpha is the hyperparameter of matrix2 and matrix 3
#beta is the hyperparameter of matrix1
t = t + 2.7
difference = state - old_state
difference = np.minimum(0, difference)
state_01 = state.copy()
#if sum(difference) != 0:
# matrix1 -= (alpha / math.log(t) ) * difference / (sum(difference))
# matrix1 += (alpha / math.log(t) ) * state / sum(state)
state_10 = 1 - state_01
matrix2 -= alpha * np.outer(state_01,state_10) / sum(state_10)
matrix2 += alpha * np.outer(state_01, state_01) / sum(state_01)
state_10 = 1 - state_01
matrix3 += alpha * np.outer(state_10, state_01) / sum(state_01)
matrix3 -= alpha * np.outer(state_10, state_10) / sum(state_10)
matrix1 = np.maximum(0, matrix1)
matrix1 = np.minimum(1, matrix1)
matrix2 = np.maximum(0, matrix2)
matrix2 = np.minimum(1, matrix2)
matrix3 = np.maximum(0, matrix3)
matrix3 = np.minimum(1, matrix3)
return [matrix2, matrix3]
def update_function_old(matrix1, matrix2, matrix3, old_state, state, old_conflicts, t, alpha, beta):
#update three state matrix
#t is the iteration number
#alpha is the hyperparameter of matrix2 and matrix 3
#beta is the hyperparameter of matrix1
t = t + 2.7
difference = state - old_state
#print(difference)
state_01 = state.copy()
state_11 = state.copy()
conflicts = old_conflicts.copy()
state_11 = state_11 * 2 - 1
conflicts = conflicts + 1
conflicts = 1 / conflicts
expanded_state_11 = np.tile(state_11, (np.shape(state_11)[0], 1))
update_matrix2 = np.outer(state_01, conflicts)
update_matrix3 = np.outer((1 - state_01), conflicts)
update_matrix2 = np.multiply(update_matrix2, expanded_state_11)
update_matrix3 = np.multiply(update_matrix3, expanded_state_11)
matrix2 += (alpha / math.log(t) ) * update_matrix2
matrix3 += (alpha / math.log(t) ) * update_matrix3
matrix2 = np.maximum(0, matrix2)
matrix3 = np.maximum(0, matrix3)
beta = beta / math.log(t)
conflicts = conflicts * beta
old_matrix = matrix1.copy()
matrix1 = np.multiply(matrix1, 1 - conflicts)
matrix1 += np.multiply(state_01, conflicts)
return [matrix1, matrix2, matrix3]
def calculate_conflict(state,graph):
# state is a list of 0s and 1s
n = graph.shape[0]
state_set = set()
#global connection_info
conflict_number = 0
conflict_info = np.zeros([1,n])
# set to contain all chosen nodes in independent set
for i in range(len(state)):
if (state[i] == 1):
state_set.add(i)
k = len(state_set)
# count conflict number
#for i in state_set:
# for node in connection_info[i]:
# if node in state_set:
# conflict_info[0,i] += 1
# reward = (conflict_number+1)/k
# return conflict_info
def cost_function(state,graph):
# state is a list of 0s and 1s
n = graph.shape[0]
state_set = set()
# global connection_info
conflict_number = 0
# set to contain all chosen nodes in independent set
for i in range(len(state)):
if (state[i] == 1):
state_set.add(i)
k = len(state_set)
k = k + 1
# count conflict number
#for i in state_set:
# for node in connection_info[i]:
# if node in state_set:
# conflict_number += 1
reward = 1 / k #+ 0.1 * conflict_number
return reward
def flipcoin(p):
r=random.random()
return r<p
def generate_random_state(n, graph):
state = [0 for i in range(n)]
nodelist = [i for i in range(n)]
for i in range(n):
newnode = nodelist[random.randint(0, len(nodelist) - 1)]
if detect_conf(state, newnode, graph, n) == False:
if flipcoin(0.5) == True:
state[newnode] = 1
else:
state[newnode] = 0
nodelist.remove(newnode)
return state
def generate_random_state_old(n):
state=[]
for i in range(n):
state.append(random.randint(0,1))
return np.array(state)
def generate_state(pmat1,pmat2,pmat3, graph):
#generate state by the three probability matrix
allstate=[]
choice = []
n=len(pmat1[0])
for i in range(n):
allstate.append(-1)
pma1_list=pmat1[0].tolist()
nodelist = [i for i in range(n)]
newnode = nodelist[random.randint(0, len(nodelist) - 1)]
if flipcoin(0.5) == True:
allstate[newnode] = 1
else:
allstate[newnode] = 0
#first choose highest prob node
chosen =[]
nodelist=[i for i in range(n)]
nodelist.remove(newnode)
#choose the other n-1 nodes
for i in range(n-1):
prob=0.0
newnode=nodelist[random.randint(0, len(nodelist)-1)]
nodelist.remove(newnode)
if detect_conf(allstate, newnode, graph, n) == True:
allstate[newnode] = 0
continue
# newnode=random.randint(0, n-1)
# while allstate[newnode]!=-1:
# newnode=random.randint(0, n-1)
#determine node by chosen nodes' prob in matrix 2,3
for j in range(n):
if (allstate[j]==1):
prob+=pmat2[j,newnode]
if (allstate[j]==0):
prob+=pmat3[j,newnode]
prob/=(i+1)
if flipcoin(prob) == True:
chosen.append(newnode)
allstate[newnode]=1
else:
allstate[newnode]=0
return allstate
| [
"numpy.shape",
"numpy.multiply",
"numpy.minimum",
"math.log",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"random.random",
"numpy.sum",
"numpy.maximum",
"random.randint"
] | [((203, 214), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (211, 214), True, 'import numpy as np\n'), ((671, 687), 'numpy.zeros', 'np.zeros', (['[1, n]'], {}), '([1, n])\n', (679, 687), True, 'import numpy as np\n'), ((696, 712), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (704, 712), True, 'import numpy as np\n'), ((721, 737), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (729, 737), True, 'import numpy as np\n'), ((3265, 3290), 'numpy.minimum', 'np.minimum', (['(0)', 'difference'], {}), '(0, difference)\n', (3275, 3290), True, 'import numpy as np\n'), ((3802, 3824), 'numpy.maximum', 'np.maximum', (['(0)', 'matrix1'], {}), '(0, matrix1)\n', (3812, 3824), True, 'import numpy as np\n'), ((3836, 3858), 'numpy.minimum', 'np.minimum', (['(1)', 'matrix1'], {}), '(1, matrix1)\n', (3846, 3858), True, 'import numpy as np\n'), ((3870, 3892), 'numpy.maximum', 'np.maximum', (['(0)', 'matrix2'], {}), '(0, matrix2)\n', (3880, 3892), True, 'import numpy as np\n'), ((3904, 3926), 'numpy.minimum', 'np.minimum', (['(1)', 'matrix2'], {}), '(1, matrix2)\n', (3914, 3926), True, 'import numpy as np\n'), ((3938, 3960), 'numpy.maximum', 'np.maximum', (['(0)', 'matrix3'], {}), '(0, matrix3)\n', (3948, 3960), True, 'import numpy as np\n'), ((3972, 3994), 'numpy.minimum', 'np.minimum', (['(1)', 'matrix3'], {}), '(1, matrix3)\n', (3982, 3994), True, 'import numpy as np\n'), ((4591, 4620), 'numpy.outer', 'np.outer', (['state_01', 'conflicts'], {}), '(state_01, conflicts)\n', (4599, 4620), True, 'import numpy as np\n'), ((4639, 4672), 'numpy.outer', 'np.outer', (['(1 - state_01)', 'conflicts'], {}), '(1 - state_01, conflicts)\n', (4647, 4672), True, 'import numpy as np\n'), ((4693, 4739), 'numpy.multiply', 'np.multiply', (['update_matrix2', 'expanded_state_11'], {}), '(update_matrix2, expanded_state_11)\n', (4704, 4739), True, 'import numpy as np\n'), ((4758, 4804), 'numpy.multiply', 'np.multiply', (['update_matrix3', 'expanded_state_11'], {}), '(update_matrix3, expanded_state_11)\n', (4769, 4804), True, 'import numpy as np\n'), ((4920, 4942), 'numpy.maximum', 'np.maximum', (['(0)', 'matrix2'], {}), '(0, matrix2)\n', (4930, 4942), True, 'import numpy as np\n'), ((4954, 4976), 'numpy.maximum', 'np.maximum', (['(0)', 'matrix3'], {}), '(0, matrix3)\n', (4964, 4976), True, 'import numpy as np\n'), ((5075, 5110), 'numpy.multiply', 'np.multiply', (['matrix1', '(1 - conflicts)'], {}), '(matrix1, 1 - conflicts)\n', (5086, 5110), True, 'import numpy as np\n'), ((5123, 5155), 'numpy.multiply', 'np.multiply', (['state_01', 'conflicts'], {}), '(state_01, conflicts)\n', (5134, 5155), True, 'import numpy as np\n'), ((5364, 5380), 'numpy.zeros', 'np.zeros', (['[1, n]'], {}), '([1, n])\n', (5372, 5380), True, 'import numpy as np\n'), ((6240, 6255), 'random.random', 'random.random', ([], {}), '()\n', (6253, 6255), False, 'import random\n'), ((6738, 6753), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (6746, 6753), True, 'import numpy as np\n'), ((2102, 2115), 'numpy.sum', 'np.sum', (['pmat1'], {}), '(pmat1)\n', (2108, 2115), True, 'import numpy as np\n'), ((2124, 2137), 'numpy.sum', 'np.sum', (['pmat2'], {}), '(pmat2)\n', (2130, 2137), True, 'import numpy as np\n'), ((2146, 2159), 'numpy.sum', 'np.sum', (['pmat3'], {}), '(pmat3)\n', (2152, 2159), True, 'import numpy as np\n'), ((4993, 5004), 'math.log', 'math.log', (['t'], {}), '(t)\n', (5001, 5004), False, 'import math\n'), ((1226, 1241), 'random.random', 'random.random', ([], {}), '()\n', (1239, 1241), False, 'import random\n'), ((1438, 1453), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1446, 1453), True, 'import numpy as np\n'), ((2584, 2608), 'random.randint', 'random.randint', (['(0)', '(n - 1)'], {}), '(0, n - 1)\n', (2598, 2608), False, 'import random\n'), ((3521, 3549), 'numpy.outer', 'np.outer', (['state_01', 'state_10'], {}), '(state_01, state_10)\n', (3529, 3549), True, 'import numpy as np\n'), ((3587, 3615), 'numpy.outer', 'np.outer', (['state_01', 'state_01'], {}), '(state_01, state_01)\n', (3595, 3615), True, 'import numpy as np\n'), ((3679, 3707), 'numpy.outer', 'np.outer', (['state_10', 'state_01'], {}), '(state_10, state_01)\n', (3687, 3707), True, 'import numpy as np\n'), ((3745, 3773), 'numpy.outer', 'np.outer', (['state_10', 'state_10'], {}), '(state_10, state_10)\n', (3753, 3773), True, 'import numpy as np\n'), ((4826, 4837), 'math.log', 'math.log', (['t'], {}), '(t)\n', (4834, 4837), False, 'import math\n'), ((4878, 4889), 'math.log', 'math.log', (['t'], {}), '(t)\n', (4886, 4889), False, 'import math\n'), ((6709, 6729), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (6723, 6729), False, 'import random\n'), ((1587, 1602), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1595, 1602), True, 'import numpy as np\n'), ((4545, 4563), 'numpy.shape', 'np.shape', (['state_11'], {}), '(state_11)\n', (4553, 4563), True, 'import numpy as np\n'), ((1482, 1501), 'numpy.array', 'np.array', (['old_state'], {}), '(old_state)\n', (1490, 1501), True, 'import numpy as np\n'), ((1504, 1519), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1512, 1519), True, 'import numpy as np\n')] |
import gpsd
import time
import numpy as np
A = 6378137.0 # WGS-84 Earth semimajor axis (m)
B = 6356752.314245 # Derived Earth semiminor axis (m)
F = (A - B)/A # Ellipsoid Flatness
F_INV = 1.0/F # Inverse Flattening
A_SQ = A**2
B_SQ = B**2
E_SQ = F * (2 - F) # Square of Eccentricity
class GNSSReceiver():
def __init__(self, gpsReferencePoint: dict, experimentName: str = ""):
self.gpsReferencePoint = (gpsReferencePoint["lat"], gpsReferencePoint["lon"], gpsReferencePoint["alt"])
self.fn = f'{experimentName}_trace.csv' if experimentName != "" else ""
gpsd.connect()
self.found_initial_fix = False
print("Waiting for GNSS fix")
while not self.found_initial_fix:
if gpsd.get_current().mode > 2:
self.found_initial_fix = True
else:
time.sleep(1) # Try again every second
self.t = np.floor(time.time())
def getCurrentPosition_Sat(self) -> tuple:
'''
Get current Position in Satellite coordinates (WGS84)
:return:
'''
raw = gpsd.get_current()
return raw.lat, raw.lon, raw.alt
def getCurrentPosition(self) -> np.array:
'''
Get current position in Cartesian coordinates
:return: (x, y, z)
'''
p = np.array(ecefToEnu(*geodeticToEcef(*self.getCurrentPosition_Sat()), *self.gpsReferencePoint))
self.fileWriteManager(p)
return p
def fileWriteManager(self, p: np.array) -> None:
'''
Writes position to trace file if specified
:param p: Position in local tangent plane
:return:
'''
t = np.floor(time.time())
if t - self.t >= 1.0: # Record every second
if self.fn != "":
f = open(self.fn, "a")
f.write(f'{t},{p[0]},{p[1]},{p[2]}\n')
f.close()
self.t = t
# Conversion from GPS (WGS84) to Local Tangent Plane
# Credits: https://gist.github.com/govert/1b373696c9a27ff4c72a
def degreesToRadians(ang: float) -> float:
return np.pi/180.0 * ang
def radiansToDegrees(ang: float) -> float:
return 180.0/ np.pi * ang
def geodeticToEcef(lat: float, lon: float, h: float) -> np.array:
_lambda = degreesToRadians(lat)
_phi = degreesToRadians(lon)
s = np.sin(_lambda)
N = A / np.sqrt(1 - E_SQ * s ** 2)
sin_lambda = np.sin(_lambda)
cos_lambda = np.cos(_lambda)
cos_phi = np.cos(_phi)
sin_phi = np.sin(_phi)
x = (h + N) * cos_lambda * cos_phi
y = (h + N) * cos_lambda * sin_phi
z = (h + (1 - E_SQ) * N) * sin_lambda
return np.array([x, y, z])
def ecefToEnu(x: float, y: float, z: float, lat0: float, lon0: float, h0: float):
_lambda = degreesToRadians(lat0)
_phi = degreesToRadians(lon0)
s = np.sin(_lambda)
N = A / np.sqrt(1 - E_SQ * s ** 2)
sin_lambda = np.sin(_lambda)
cos_lambda = np.cos(_lambda)
cos_phi = np.cos(_phi)
sin_phi = np.sin(_phi)
x0 = (h0 + N) * cos_lambda * cos_phi
y0 = (h0 + N) * cos_lambda * sin_phi
z0 = (h0 + (1 - E_SQ) * N) * sin_lambda
xd = x - x0
yd = y - y0
zd = z - z0
# This is the matrix multiplication
xEast = -sin_phi * xd + cos_phi * yd
yNorth = -cos_phi * sin_lambda * xd - sin_lambda * sin_phi * yd + cos_lambda * zd
zUp = cos_lambda * cos_phi * xd + cos_lambda * sin_phi * yd + sin_lambda * zd
return np.array([xEast, yNorth, zUp])
if __name__ == '__main__':
gpsReferencePoint = {
"lat": 50.941220,
"lon": 6.957029,
"alt": 40.0
}
gpsp = GNSSReceiver(gpsReferencePoint)
while 1:
time.sleep(1)
print(gpsp.getCurrentPosition())
| [
"numpy.sqrt",
"gpsd.connect",
"time.sleep",
"gpsd.get_current",
"numpy.array",
"numpy.cos",
"numpy.sin",
"time.time"
] | [((2318, 2333), 'numpy.sin', 'np.sin', (['_lambda'], {}), '(_lambda)\n', (2324, 2333), True, 'import numpy as np\n'), ((2391, 2406), 'numpy.sin', 'np.sin', (['_lambda'], {}), '(_lambda)\n', (2397, 2406), True, 'import numpy as np\n'), ((2424, 2439), 'numpy.cos', 'np.cos', (['_lambda'], {}), '(_lambda)\n', (2430, 2439), True, 'import numpy as np\n'), ((2454, 2466), 'numpy.cos', 'np.cos', (['_phi'], {}), '(_phi)\n', (2460, 2466), True, 'import numpy as np\n'), ((2481, 2493), 'numpy.sin', 'np.sin', (['_phi'], {}), '(_phi)\n', (2487, 2493), True, 'import numpy as np\n'), ((2627, 2646), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (2635, 2646), True, 'import numpy as np\n'), ((2809, 2824), 'numpy.sin', 'np.sin', (['_lambda'], {}), '(_lambda)\n', (2815, 2824), True, 'import numpy as np\n'), ((2882, 2897), 'numpy.sin', 'np.sin', (['_lambda'], {}), '(_lambda)\n', (2888, 2897), True, 'import numpy as np\n'), ((2915, 2930), 'numpy.cos', 'np.cos', (['_lambda'], {}), '(_lambda)\n', (2921, 2930), True, 'import numpy as np\n'), ((2945, 2957), 'numpy.cos', 'np.cos', (['_phi'], {}), '(_phi)\n', (2951, 2957), True, 'import numpy as np\n'), ((2972, 2984), 'numpy.sin', 'np.sin', (['_phi'], {}), '(_phi)\n', (2978, 2984), True, 'import numpy as np\n'), ((3423, 3453), 'numpy.array', 'np.array', (['[xEast, yNorth, zUp]'], {}), '([xEast, yNorth, zUp])\n', (3431, 3453), True, 'import numpy as np\n'), ((584, 598), 'gpsd.connect', 'gpsd.connect', ([], {}), '()\n', (596, 598), False, 'import gpsd\n'), ((1085, 1103), 'gpsd.get_current', 'gpsd.get_current', ([], {}), '()\n', (1101, 1103), False, 'import gpsd\n'), ((2346, 2372), 'numpy.sqrt', 'np.sqrt', (['(1 - E_SQ * s ** 2)'], {}), '(1 - E_SQ * s ** 2)\n', (2353, 2372), True, 'import numpy as np\n'), ((2837, 2863), 'numpy.sqrt', 'np.sqrt', (['(1 - E_SQ * s ** 2)'], {}), '(1 - E_SQ * s ** 2)\n', (2844, 2863), True, 'import numpy as np\n'), ((3651, 3664), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3661, 3664), False, 'import time\n'), ((907, 918), 'time.time', 'time.time', ([], {}), '()\n', (916, 918), False, 'import time\n'), ((1670, 1681), 'time.time', 'time.time', ([], {}), '()\n', (1679, 1681), False, 'import time\n'), ((842, 855), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (852, 855), False, 'import time\n'), ((733, 751), 'gpsd.get_current', 'gpsd.get_current', ([], {}), '()\n', (749, 751), False, 'import gpsd\n')] |
import numpy as np, itertools
from McUtils.Numputils import SparseArray
import McUtils.Numputils as nput
from McUtils.Data import UnitsData
from McUtils.Scaffolding import NullLogger, Checkpointer
from ..BasisReps import BasisStateSpace, BasisMultiStateSpace, SelectionRuleStateSpace
from .Common import PerturbationTheoryException, _safe_dot
__all__ = [
"PerturbationTheoryCorrections"
]
class PerturbationTheoryCorrections:
"""
Represents a set of corrections from perturbation theory.
Can be used to correct other operators in the basis of the original calculation.
"""
def __init__(self,
states,
coupled_states,
total_basis,
energy_corrs,
wfn_corrections,
all_energy_corrections=None,
degenerate_states=None,
degenerate_transformation=None,
degenerate_energies=None,
degenerate_hamiltonians=None,
logger=None
):
"""
:param states:
:type states: BasisStateSpace
:param coupled_states:
:type coupled_states: BasisMultiStateSpace
:param total_basis:
:type total_basis: BasisMultiStateSpace
:param energy_corrs:
:type energy_corrs: np.ndarray
:param wfn_corrections:
:type wfn_corrections: Iterable[SparseArray]
:param degenerate_states:
:type degenerate_states: None | np.ndarray
:param degenerate_transformation:
:type degenerate_transformation: None | np.ndarray
:param degenerate_energies:
:type degenerate_energies: None | np.ndarray
"""
self.states = states
self.coupled_states = coupled_states
self.total_basis = total_basis
self.energy_corrs = energy_corrs
self.all_energy_corrs = all_energy_corrections
self.wfn_corrections = wfn_corrections
self.degenerate_states = degenerate_states
self.degenerate_transf = degenerate_transformation
self.degenerate_energies = degenerate_energies
self.degenerate_hamiltonians = degenerate_hamiltonians
self.logger = logger
@classmethod
def from_dicts(cls,
states,
corrections,
**opts
):
"""
:param states: a dict with the states described by the corrections, the set of states coupled, and the size of the overall basis
:type states: dict
:param corrections: the corrections generated, including the corrections for the energies, wavefunctions, and a transformation from degenerate PT
:type corrections: dict
"""
state_space = states['states']
coupled_states = states['coupled_states']
total_basis = states['total_states']
energy_corrs = corrections['energies']
all_energy_corrs = corrections['energy_corrections'] if 'energy_corrections' in corrections else None
wfn_corrections = corrections['wavefunctions']
if 'degenerate_states' in states:
degenerate_states = states['degenerate_states']
else:
degenerate_states = None
if 'degenerate_transformation' in corrections:
degenerate_transf = corrections['degenerate_transformation']
else:
degenerate_transf = None
if 'degenerate_energies' in corrections:
degenerate_energies = corrections['degenerate_energies']
else:
degenerate_energies = None
return cls(
state_space,
coupled_states,
total_basis,
energy_corrs,
wfn_corrections,
all_energy_corrections=all_energy_corrs,
degenerate_states=degenerate_states,
degenerate_transformation=degenerate_transf,
degenerate_energies=degenerate_energies,
**opts
)
@property
def degenerate(self):
"""
:return:
:rtype:
"""
return self.degenerate_transf is not None
@property
def energies(self):
"""
:return:
:rtype:
"""
if self.degenerate:
return self.degenerate_energies
else:
return np.sum(self.energy_corrs, axis=1)
@property
def order(self):
"""
:return:
:rtype:
"""
return len(self.energy_corrs[0])
def take_subspace(self, space):
"""
Takes only those elements that are in space
:param space:
:type space:
:return:
:rtype:
"""
new_states = self.states.find(space)
# print("? =", new_states)
return type(self)(
self.states.take_subspace(new_states),
self.coupled_states.take_states(space),
self.total_basis,
self.energy_corrs[new_states],
[w[new_states, :] for w in self.wfn_corrections],
# not sure what to do with all this...
degenerate_states=self.degenerate_states,
degenerate_transformation=self.degenerate_transf,
degenerate_energies=self.degenerate_energies,
logger=self.logger
)
@classmethod
def create_coupling_matrix(cls, corrs,
states:BasisStateSpace, flat_total_space:BasisStateSpace,
nstates, order,
filters=None,
non_zero_cutoff=1.0e-14,
logger=None
):
"""
:param corrs:
:type corrs:
:param states:
:type states:
:param flat_total_space:
:type flat_total_space:
:param nstates:
:type nstates:
:param order:
:type order:
:param filters:
:type filters:
:param non_zero_cutoff:
:type non_zero_cutoff:
:return:
:rtype:
"""
# now we recompute reduced state spaces for use in results processing
# and we also convert the correction vectors to sparse representations
tci = flat_total_space.indices
N = len(tci)
is_transp = len(corrs) == order and len(corrs[0]) == nstates
is_sparse = isinstance(corrs[0], SparseArray) if is_transp else isinstance(corrs, SparseArray)
# nstates = len(all_corrs)
corr_mats = [None] * (order + 1)
corr_inds = [[] for i in range(nstates)]
for o in range(order + 1):
if filters is not None:
# we use this to check that we're only
# allowing transitions that our filters support
nquanta_rules = [
v for k,v in filters.items() if sum(k) == o
]
else:
nquanta_rules = None
non_zeros = []
if is_sparse:
if is_transp:
sp_vals, sp_inds = corrs[o].block_data
nonzi = np.where(np.abs(sp_vals) > non_zero_cutoff)[0]
sp_vals = sp_vals[nonzi,]
sp_inds = tuple(s[nonzi] for s in sp_inds)
corr_mats[o] = SparseArray.from_data(
(
sp_vals,
sp_inds
),
shape=(nstates, N),
cache_block_data=False
)
ind_keys, ind_vals = nput.group_by(sp_inds[1], sp_inds[0])[0]
for i,v in zip(ind_keys, ind_vals):
corr_inds[i].append(v)
else:
raise NotImplementedError("constructing final coupling matrix from (order, nstates, N) `SparseArray` not supported")
else:
initial_quanta = np.sum(states.excitations, axis=1)
for i in range(nstates):
if is_transp:
nonzi = np.where(np.abs(corrs[o, i]) > non_zero_cutoff)[0]
vals = corrs[o, i][nonzi,]
else:
nonzi = np.where(np.abs(corrs[i, o]) > non_zero_cutoff)[0]
vals = corrs[i, o][nonzi,]
if len(nonzi) > 0:
# we attempt to filter out things that can't touch based on our filter rules
target_quanta = np.sum(flat_total_space.take_subspace(nonzi).excitations, axis=1)
if nquanta_rules is not None and len(nquanta_rules) > 0:
# from .Solver import PerturbationTheoryStateSpaceFilter
mask = None
for f in nquanta_rules:
# f:PerturbationTheoryStateSpaceFilter
for (filter_space, filter_rules) in f.prefilters:
is_in = states.take_subspace([i]).intersection(filter_space)
if len(is_in) > 0:
q_diffs = target_quanta - initial_quanta[i]
poss_diffs = np.unique([sum(x) for x in filter_rules])
if mask is None:
mask = np.isin(q_diffs, poss_diffs)
else:
mask = np.logical_or(mask, np.isin(q_diffs, poss_diffs))
if mask is not None:
nonzi = nonzi[mask]
vals = vals[mask]
else:
if logger is not None:
logger.log_print("No corrections for state {s} at order {o}",
s=states.excitations[i],
o=o
)
# and then we add the appropriate basis indices to the list of basis data
non_zeros.append(
(
vals,
np.column_stack([
np.full(len(nonzi), i),
nonzi
])
)
)
corr_inds[i].append(tci[nonzi,])
# now we build the full mat rep for this level of correction
vals = np.concatenate([x[0] for x in non_zeros])
inds = np.concatenate([x[1] for x in non_zeros], axis=0).T
corr_mats[o] = SparseArray.from_data(
(
vals,
inds
),
shape=(nstates, N),
cache_block_data=False
)
# now we build state reps from corr_inds
for i, dat in enumerate(corr_inds): #TODO: this might break with pruning...I can't really be sure at this point
spaces = []
for substates in dat:
_, upos = np.unique(substates, return_index=True)
usubs = substates[np.sort(upos)]
spaces.append(flat_total_space.take_states(usubs))
corr_inds[i] = BasisMultiStateSpace(np.array(spaces, dtype=object))
return corr_mats, corr_inds
def prune(self, threshold=.1, in_place=False):
"""
Returns corrections with couplings less than the given cutoff set to zero
:param threshold:
:type threshold:
:return:
:rtype:
"""
if not in_place:
import copy
new = copy.copy(self)
new.prune(threshold=threshold, in_place=False)
# might need to work harder here...
return new
for o in range(1, self.order):
sp_vals, sp_inds = self.wfn_corrections[o].block_data
mask = abs(sp_vals) > threshold
sp_vals = sp_vals[mask]
sp_inds = tuple(s[mask] for s in sp_inds)
self.wfn_corrections[o].block_vals = sp_vals
self.wfn_corrections[o].block_inds = sp_inds
return self
def _take_subham(self, rep, inds):
"""
Builds a subsampled version of a representation Hamiltonian
to allow equations to be efficiently solved in subspaces.
:param rep: representation matrix from which to pull the subspace
:type rep: SparseArray
:param inds: indices for the subspace
:type inds: np.ndarray
:return:
:rtype:
"""
ind_pairs = np.array(list(itertools.product(inds, inds))).T
return np.reshape(rep[ind_pairs], (len(inds), len(inds)))
def get_transformed_Hamiltonians(self, hams, deg_group=None):
"""
:param corrs:
:type corrs:
:param deg_group:
:type deg_group:
:return:
:rtype:
"""
if deg_group is not None:
subcorrs = self.take_subspace(deg_group)
inds = self.total_basis.find(deg_group)
subhams = [SparseArray.from_data(self._take_subham(H, inds)) for H in hams]
# H_nd =[
# x.asarray() if isinstance(x, SparseArray) else x
# for x in subcorrs.operator_representation(subhams, subspace=deg_group)
# ]
H_nd = [
x.asarray() if isinstance(x, SparseArray) else x
for x in subcorrs.operator_representation(subhams, subspace=deg_group, logger_symbol="H")
]
else:
subhams = hams
H_nd = [
x.asarray() if isinstance(x, SparseArray) else x
for x in self.operator_representation(subhams, logger_symbol="H", logger_conversion=UnitsData.convert("Hartrees", "Wavenumbers"))
]
return H_nd
def get_degenerate_rotation(self, deg_group, hams):
"""
:param deg_group:
:type deg_group:
:param corrs:
:type corrs:
:return:
:rtype:
"""
logger = self.logger
# raise Exception(corrs.states.excitations, deg_group)
with logger.block(tag="states"):
logger.log_print(
str(
self.states.take_states(deg_group).excitations
).splitlines()
)
subdegs = self.take_subspace(deg_group)
# from McUtils.Scaffolding import JSONSerializer
# import os
# with open(os.path.expanduser("~/Desktop/wat6.json"), "w+") as woof:
# JSONSerializer().serialize(woof, subdegs)
# H_nd = self.get_transformed_Hamiltonians(corrs, deg_group)
# for h in H_nd[1:]:
# np.fill_diagonal(h, 0.)
H_nd_corrs = subdegs.get_transformed_Hamiltonians(hams, deg_group=None)
# import McUtils.Plots as plt
# plt.TensorPlot(np.array(H_nd)).show()
H_nd = np.sum(H_nd_corrs, axis=0)
if np.sum(H_nd) == 0:
raise Exception(subdegs.wfn_corrections)
# raise Exception(deg_group.excitations,
# self.states.take_states(deg_group).excitations,
# # self.coupled_states.take_states(deg_group).excitations
# )
# overlaps = np.sum(subdegs.get_overlap_matrices(), axis=0)
with logger.block(tag="non-degenerate Hamiltonian"):
logger.log_print(
str(
np.round(H_nd * UnitsData.convert("Hartrees", "Wavenumbers")).astype(int)
).splitlines()
)
deg_engs, deg_transf = np.linalg.eigh(H_nd)
ov_thresh = .5
for i in range(len(deg_transf)):
max_ov = np.max(deg_transf[:, i] ** 2)
if max_ov < ov_thresh: # there must be a single mode that has more than 50% of the initial state character?
logger.log_print(
" state {i} is more than 50% mixed",
i=i
)
# raise PerturbationTheoryException("mode {} is has no contribution of greater than {}".format(
# i, ov_thresh
# ))
# we pick the terms with the max contribution from each input state
# and zero out the contributions so that two states can't map
# to the same input state
sort_transf = np.abs(deg_transf.copy())
sorting = [-1] * len(deg_transf)
for i in range(len(deg_transf)):
o = np.argmax(sort_transf[i, :])
sorting[i] = o
sort_transf[:, o] = 0. # np.zeros(len(sort_transf))
with logger.block(tag='contributions'):
logger.log_print(
str(np.round(100 * (deg_transf ** 2)).astype(int)).splitlines()
)
logger.log_print('sorting: {s}', s=sorting)
# sorting = np.argsort(sorting)
#
# # if len(sorting) != len(np.unique(sorting)):
# # raise PerturbationTheoryException("After diagonalizing can't distinguish modes...")
deg_engs = deg_engs[sorting,]
self.logger.log_print("degenerate energies {e}",
e=np.round(deg_engs * UnitsData.convert("Hartrees", "Wavenumbers")))
deg_transf = deg_transf[:, sorting]
return H_nd_corrs, deg_engs, deg_transf
def get_degenerate_transformation(self, group, hams, gaussian_resonance_handling=False):
# this will be built from a series of block-diagonal matrices
# so we store the relevant values and indices to compose the SparseArray
# we apply the degenerate PT on a group-by-group basis
# by transforming the H reps into the non-degenerate basis
# print(">", group.excitations)
deg_inds = self.states.find(group, missing_val=-1)
mask = deg_inds > -1
deg_inds = deg_inds[mask]
if not mask.all():
bad = group.take_subspace(np.where(np.logical_not(mask))[0])
self.logger.log_print(
"WARNING: got degeneracy spec including states {} that are not in space of corrected states".format(
bad.excitations
))
group = group.take_subspace(np.where(mask)[0])
# print(">..", deg_inds, self.states.find(group, missing_val=-1))
if len(deg_inds) == 1 or (
gaussian_resonance_handling and np.max(np.sum(group.excitations, axis=1)) > 2):
H_nd = deg_engs = deg_rot = None
elif len(deg_inds) > 1:
H_nd, deg_engs, deg_rot = self.get_degenerate_rotation(group, hams)
else:
H_nd = deg_engs = deg_rot = None
# raise NotImplementedError("Not sure what to do when no states in degeneracy spec are in total space")
return deg_inds, H_nd, deg_rot, deg_engs
@staticmethod
def default_state_filter(state, couplings, energy_cutoff=None, energies=None, basis=None, target_modes=None):
"""
Excludes modes that differ in only one position, prioritizing states with fewer numbers of quanta
(potentially add restrictions to high frequency modes...?)
:param input_state:
:type input_state:
:param couplings:
:type couplings:
:return:
:rtype:
"""
if target_modes is None:
target_modes = np.arange(len(state.excitations[0]))
if energy_cutoff is not None:
state_ind = basis.find(state)
coupling_inds = basis.find(couplings)
diff_mask = np.abs(energies[coupling_inds] - energies[state_ind]) > energy_cutoff
else:
exc_1 = state.excitations[0, target_modes]
exc_2 = couplings.excitations[:, target_modes]
diffs = exc_2 - exc_1[np.newaxis, :]
diff_sums = np.sum(diffs != 0, axis=1)
diff_mask = np.logical_or(
np.sum(couplings.excitations, axis=1) == 0, # drop ground state
diff_sums == 1 # find where changes are only in one position
)
# now drop these modes
# print(couplings.excitations)
if diff_mask.any():
if diff_mask.all():
couplings = None
else:
# print(">>>", state.excitations[0])
# print(couplings.excitations)
couplings = couplings.take_subspace(np.where(np.logical_not(diff_mask))[0])
# print("===")
# print(couplings.excitations)
# print(couplings.excitations)
return couplings
def find_strong_couplings(self, threshold=.1, state_filter=None):
"""
Finds positions in the expansion matrices where the couplings are too large
:param threshold:
:type threshold:
:return:
:rtype:
"""
if state_filter is None:
state_filter = self.default_state_filter
order = self.order
strong_couplings = {}
for o in range(1, order):
sp_vals, sp_inds = self.wfn_corrections[o].block_data
nonzi = np.where(np.abs(sp_vals) > threshold)[0]
sp_inds = tuple(s[nonzi] for s in sp_inds)
ind_keys, ind_vals = nput.group_by(sp_inds[1], self.states.indices[sp_inds[0]])[0]
# print(self.states.indices[sp_inds[0]])
# print(tci[sp_inds[1]])
for i, v in zip(ind_keys, ind_vals):
_, upos = np.unique(v, return_index=True)
usubs = v[np.sort(upos)]
states = state_filter(self.states.take_states([i]), self.total_basis.take_subspace(usubs))
if states is not None and len(states) > 0:
if i not in strong_couplings:
strong_couplings[i] = [None for _ in range(order)]
strong_couplings[i][o] = states
return strong_couplings
def format_strong_couplings_report(self, couplings=None, threshold=.1, int_fmt="{:>3.0f}", padding="{:<8}", join=True, use_excitations=True):
if couplings is None:
couplings = self.find_strong_couplings(threshold=threshold)
list_fmt = " ".join(int_fmt for _ in range(self.total_basis.ndim))
coupling_statements = []
for i,v in sorted(couplings.items(), key=lambda k:k[0]):
if use_excitations:
i = self.states.take_states([i]).excitations[0]
coupling_statements.append(padding.format("state:") + list_fmt.format(*i))
# print(v)
for n,l in enumerate(v):
if l is not None and len(l) > 0:
coupling_statements.extend(padding.format(" order {}".format(n) if j == 0 else "")+list_fmt.format(*e) for j,e in enumerate(l.excitations))
else:
coupling_statements.append(list_fmt.format(i))
coupling_statements.append(padding + str(v.indices))
return coupling_statements if not join else "\n".join(coupling_statements)
def collapse_strong_couplings(self, sc:dict):
"""
:param sc:
:type sc:
:return:
:rtype:
"""
new = {}
for k,v in sc.items():
s = None
for s2 in v:
if s2 is not None:
if s is None:
s = s2
else:
s = s.concatenate(s2)
new[k] = s
return new
# def apply_martin_test(self, hams):
# zoos =
# (np.abs(H1_block) ** 4) / (diffs ** 3)
@staticmethod
def _fmt_operator_rep(full_ops, operator_symbol, conversion, real_fmt="{:>.8e}", padding_fmt='{:>16}'):
tag_line = None
rep_lines = None
op_dim = None
for (a, b, c), subrep in full_ops:
if isinstance(subrep, SparseArray):
subrep = subrep.asarray()
elif isinstance(subrep, (int, float, np.integer, np.floating)):
if subrep == 0:
if op_dim is None:
raise ValueError("was lazy and haven't filled operator dim yet...")
subrep = np.zeros(op_dim)
else:
raise ValueError("don't know what to do with representation '{}'".format(subrep))
if op_dim is None:
op_dim = subrep.shape
if conversion is not None:
subrep = subrep * conversion
subrep_lines = [
" ".join(padding_fmt.format(real_fmt.format(e)) for e in line)
for line in subrep
]
line_len = len(subrep_lines[0])
if rep_lines is None:
rep_lines = subrep_lines
else:
rep_lines = [x + " " + y for x,y in zip(rep_lines, subrep_lines)]
tag_fmt = "{:<" + str(line_len) + "}"
base_tag=tag_fmt.format("<{a}|{A}({c})|{b}>".format(A=operator_symbol, a=a, c=c, b=b))
if tag_line is None:
tag_line = base_tag
else:
tag_line += " " + base_tag
# we want to return a line list so the logger can add any prefixes it needs
rep_lines.insert(0, tag_line)
return rep_lines
def operator_representation(self, operator_expansion, order=None, subspace=None, contract=True,
logger_symbol="A",
logger_conversion=None
):
"""
Generates the representation of the operator in the basis of stored states
:param operator_expansion: the expansion of the operator
:type operator_expansion: Iterable[float] | Iterable[np.ndarray]
:param order: the order of correction to go up to
:type order: Iterable[float] | Iterable[np.ndarray]
:param subspace: the subspace of terms in which the operator expansion is defined
:type subspace: None | BasisStateSpace
:return: the set of representation matrices for this operator
:rtype: Iterable[np.ndarray]
"""
mordor = self.order - 1
if order is None:
order = mordor
if order > mordor:
raise PerturbationTheoryException("{}: can't correct up to order {} when zero-order states were only corrected up to order {}".format(
type(self).__name__,
order,
mordor
))
order = order + 1 # so that we actually do get up to the request order after accounting for the zeros...
if len(operator_expansion) < order:
operator_expansion = list(operator_expansion) + [0]*(order - len(operator_expansion))
# we stopped supporting indexing based on the total set of inds...
if subspace is None:
wfn_corrs = self.wfn_corrections[:order]
else:
# need to make the subspace good with the subspace in which the corrections are defined...
subspace_sel = self.total_basis.find(subspace, check=True)
wfn_corrs = []
for k in range(order):
wfn_corrs.append(self.wfn_corrections[k][:, subspace_sel])
# generalizes the dot product so that we can use 0 as a special value...
dot = _safe_dot
logger = self.logger
logger = None if logger is None or isinstance(logger, NullLogger) else logger
# does the dirty work of acutally applying the rep...
reps = [[] for _ in range(order)]
full_ops = []
for k in range(order):
tags = []
op = []
# apply each thing up to requested order...
for a in range(k+1): # if k == 2: a=0, a=1, a=2
for b in range(k-a+1): # if k==2, a==0: b=0, b=1, b=2; a==1: b=0, b=1
c = k - (a + b) # a + b + c == k
rop = operator_expansion[c]
if isinstance(rop, (int, float, np.integer, np.floating)): # constant reps...
if rop != 0: # cheap easy check
subrep = rop * dot(wfn_corrs[a], wfn_corrs[b].T)
op.append(subrep)
else:
subrep = 0
op.append(0)
else:
subrep = dot(dot(wfn_corrs[a], rop), wfn_corrs[b].T)
op.append(subrep)
full_ops.append([
(a, b, c),
subrep
])
if contract:
op = sum(op)
reps[k] = op
if logger is not None:
logger.log_print(full_ops, logger_symbol, logger_conversion, message_prepper=self._fmt_operator_rep)
return reps
def get_overlap_matrices(self):
"""
Returns the overlap matrices for the set of corrections
at each order of correction
:return:
:rtype:
"""
wat = []
for k in range(2 + 1):
ov = None
for i in range(k + 1):
c1 = self.wfn_corrections[i].asarray()
c2 = self.wfn_corrections[k - i].asarray()
if ov is None:
ov = np.dot(c1, c2.T)
else:
ov += np.dot(c1, c2.T)
wat.append(ov)
return wat
# def checkpoint_save(self, checkpoint:Checkpointer):
# """
# Writes correction arrays to checkpoint
#
# :param checkpoint:
# :type checkpoint:
# :return:
# :rtype:
# """
# import gc, sys
#
# # self.states = states
# # self.coupled_states = coupled_states
# # self.total_basis = total_basis
# # self.energy_corrs = energy_corrs
# # self.all_energy_corrs = all_energy_corrections
# # self.wfn_corrections = wfn_corrections
# # self.degenerate_states = degenerate_states
# # self.degenerate_transf = degenerate_transformation
# # self.degenerate_energies = degenerate_energies
# # self.logger = logger
# checkpoint["wfn_corrections"] = self.wfn_corrections
# print(">>>>", sys.getrefcount(self.wfn_corrections))
# self.wfn_corrections = None
# gc.collect()
#
# def checkpoint_reload(self, checkpoint:Checkpointer):
# self.wfn_corrections = checkpoint['wfn_corrections']
#
# def disk_backed(self):
# return self.chk_backer(self)
#
# class chk_backer:
# def __init__(self, parent):
# self.parent = parent
# self.chk = None
# def load_chk(self):
# import tempfile as tf
# target = tf.NamedTemporaryFile(suffix=".hdf5").name
# self.chk = Checkpointer.from_file(target)
# def unload_chk(self):
# import os
# os.remove(self.chk.checkpoint_file)
# def __enter__(self):
# self.load_chk()
# self.chk.__enter__()
# self.parent.checkpoint_save(self.chk)
# def __exit__(self, exc_type, exc_val, exc_tb):
# self.parent.checkpoint_reload(self.chk)
# self.chk.__exit__(exc_type, exc_val, exc_tb)
# self.unload_chk()
def savez(self, file):
raise NotImplementedError("old and wrong now")
keys = dict(
states=self.states,
coupled_states=self.coupled_states,
total_states=self.total_basis,
energies=self.energy_corrs,
wavefunctions=self.wfn_corrections
)
if self.degenerate_states is not None:
keys['degenerate_states'] = self.degenerate_states
if self.degenerate_transf is not None:
keys['degenerate_transformation'] = self.degenerate_transf
if self.degenerate_energies is not None:
keys['degenerate_energies'] = self.degenerate_energies
np.savez(file, **keys)
@classmethod
def loadz(cls, file):
keys = np.load(file)
return cls.from_dicts(
{
"states":keys['states'],
"coupled_states":keys['coupled_states'],
"total_states":keys['total_states'],
"degenerate_states":keys['degenerate_states'] if 'degenerate_states' in keys else None
},
{
"energies":keys['energies'],
"wavefunctions":keys['wavefunctions'],
"degenerate_transformation": keys['degenerate_transformation'] if 'degenerate_transformation' in keys else None,
"degenerate_energies": keys['degenerate_energies'] if 'degenerate_energies' in keys else None
},
keys['hamiltonians']
)
def to_state(self, serializer=None):
keys = dict(
states=self.states,
coupled_states=self.coupled_states,
total_states=self.total_basis,
energies=self.energy_corrs,
wavefunctions=self.wfn_corrections,
degenerate_states=self.degenerate_states,
degenerate_transformations=self.degenerate_transf,
degenerate_energies=self.degenerate_energies
)
return keys
@classmethod
def from_state(cls, data, serializer=None):
return cls.from_dicts(
{
"states": serializer.deserialize(data['states']),
"coupled_states": serializer.deserialize(data['coupled_states']),
"total_states": serializer.deserialize(data['coupled_states']),
"degenerate_states": serializer.deserialize(data['degenerate_states']),
},
{
"energies": data['energies'],
"wavefunctions": data['wavefunctions'],
"degenerate_transformation": data['degenerate_transformations'],
"degenerate_energies": data['degenerate_energies']
},
data['hamiltonians'] # we probably want to ditch this for memory reasons...
) | [
"numpy.logical_not",
"numpy.isin",
"numpy.array",
"McUtils.Data.UnitsData.convert",
"copy.copy",
"numpy.savez",
"numpy.where",
"numpy.sort",
"itertools.product",
"numpy.max",
"McUtils.Numputils.group_by",
"numpy.dot",
"numpy.concatenate",
"numpy.linalg.eigh",
"numpy.round",
"numpy.abs"... | [((15206, 15232), 'numpy.sum', 'np.sum', (['H_nd_corrs'], {'axis': '(0)'}), '(H_nd_corrs, axis=0)\n', (15212, 15232), True, 'import numpy as np, itertools\n'), ((15918, 15938), 'numpy.linalg.eigh', 'np.linalg.eigh', (['H_nd'], {}), '(H_nd)\n', (15932, 15938), True, 'import numpy as np, itertools\n'), ((32389, 32411), 'numpy.savez', 'np.savez', (['file'], {}), '(file, **keys)\n', (32397, 32411), True, 'import numpy as np, itertools\n'), ((32470, 32483), 'numpy.load', 'np.load', (['file'], {}), '(file)\n', (32477, 32483), True, 'import numpy as np, itertools\n'), ((4356, 4389), 'numpy.sum', 'np.sum', (['self.energy_corrs'], {'axis': '(1)'}), '(self.energy_corrs, axis=1)\n', (4362, 4389), True, 'import numpy as np, itertools\n'), ((11902, 11917), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (11911, 11917), False, 'import copy\n'), ((15244, 15256), 'numpy.sum', 'np.sum', (['H_nd'], {}), '(H_nd)\n', (15250, 15256), True, 'import numpy as np, itertools\n'), ((16025, 16054), 'numpy.max', 'np.max', (['(deg_transf[:, i] ** 2)'], {}), '(deg_transf[:, i] ** 2)\n', (16031, 16054), True, 'import numpy as np, itertools\n'), ((16807, 16835), 'numpy.argmax', 'np.argmax', (['sort_transf[i, :]'], {}), '(sort_transf[i, :])\n', (16816, 16835), True, 'import numpy as np, itertools\n'), ((20133, 20159), 'numpy.sum', 'np.sum', (['(diffs != 0)'], {'axis': '(1)'}), '(diffs != 0, axis=1)\n', (20139, 20159), True, 'import numpy as np, itertools\n'), ((8008, 8042), 'numpy.sum', 'np.sum', (['states.excitations'], {'axis': '(1)'}), '(states.excitations, axis=1)\n', (8014, 8042), True, 'import numpy as np, itertools\n'), ((10689, 10730), 'numpy.concatenate', 'np.concatenate', (['[x[0] for x in non_zeros]'], {}), '([x[0] for x in non_zeros])\n', (10703, 10730), True, 'import numpy as np, itertools\n'), ((10837, 10916), 'McUtils.Numputils.SparseArray.from_data', 'SparseArray.from_data', (['(vals, inds)'], {'shape': '(nstates, N)', 'cache_block_data': '(False)'}), '((vals, inds), shape=(nstates, N), cache_block_data=False)\n', (10858, 10916), False, 'from McUtils.Numputils import SparseArray\n'), ((11319, 11358), 'numpy.unique', 'np.unique', (['substates'], {'return_index': '(True)'}), '(substates, return_index=True)\n', (11328, 11358), True, 'import numpy as np, itertools\n'), ((11523, 11553), 'numpy.array', 'np.array', (['spaces'], {'dtype': 'object'}), '(spaces, dtype=object)\n', (11531, 11553), True, 'import numpy as np, itertools\n'), ((18532, 18546), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (18540, 18546), True, 'import numpy as np, itertools\n'), ((19862, 19915), 'numpy.abs', 'np.abs', (['(energies[coupling_inds] - energies[state_ind])'], {}), '(energies[coupling_inds] - energies[state_ind])\n', (19868, 19915), True, 'import numpy as np, itertools\n'), ((21543, 21601), 'McUtils.Numputils.group_by', 'nput.group_by', (['sp_inds[1]', 'self.states.indices[sp_inds[0]]'], {}), '(sp_inds[1], self.states.indices[sp_inds[0]])\n', (21556, 21601), True, 'import McUtils.Numputils as nput\n'), ((21770, 21801), 'numpy.unique', 'np.unique', (['v'], {'return_index': '(True)'}), '(v, return_index=True)\n', (21779, 21801), True, 'import numpy as np, itertools\n'), ((7351, 7440), 'McUtils.Numputils.SparseArray.from_data', 'SparseArray.from_data', (['(sp_vals, sp_inds)'], {'shape': '(nstates, N)', 'cache_block_data': '(False)'}), '((sp_vals, sp_inds), shape=(nstates, N),\n cache_block_data=False)\n', (7372, 7440), False, 'from McUtils.Numputils import SparseArray\n'), ((10754, 10803), 'numpy.concatenate', 'np.concatenate', (['[x[1] for x in non_zeros]'], {'axis': '(0)'}), '([x[1] for x in non_zeros], axis=0)\n', (10768, 10803), True, 'import numpy as np, itertools\n'), ((11393, 11406), 'numpy.sort', 'np.sort', (['upos'], {}), '(upos)\n', (11400, 11406), True, 'import numpy as np, itertools\n'), ((12870, 12899), 'itertools.product', 'itertools.product', (['inds', 'inds'], {}), '(inds, inds)\n', (12887, 12899), False, 'import numpy as np, itertools\n'), ((20215, 20252), 'numpy.sum', 'np.sum', (['couplings.excitations'], {'axis': '(1)'}), '(couplings.excitations, axis=1)\n', (20221, 20252), True, 'import numpy as np, itertools\n'), ((21828, 21841), 'numpy.sort', 'np.sort', (['upos'], {}), '(upos)\n', (21835, 21841), True, 'import numpy as np, itertools\n'), ((29660, 29676), 'numpy.dot', 'np.dot', (['c1', 'c2.T'], {}), '(c1, c2.T)\n', (29666, 29676), True, 'import numpy as np, itertools\n'), ((29725, 29741), 'numpy.dot', 'np.dot', (['c1', 'c2.T'], {}), '(c1, c2.T)\n', (29731, 29741), True, 'import numpy as np, itertools\n'), ((7654, 7691), 'McUtils.Numputils.group_by', 'nput.group_by', (['sp_inds[1]', 'sp_inds[0]'], {}), '(sp_inds[1], sp_inds[0])\n', (7667, 7691), True, 'import McUtils.Numputils as nput\n'), ((17510, 17554), 'McUtils.Data.UnitsData.convert', 'UnitsData.convert', (['"""Hartrees"""', '"""Wavenumbers"""'], {}), "('Hartrees', 'Wavenumbers')\n", (17527, 17554), False, 'from McUtils.Data import UnitsData\n'), ((18263, 18283), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (18277, 18283), True, 'import numpy as np, itertools\n'), ((18716, 18749), 'numpy.sum', 'np.sum', (['group.excitations'], {'axis': '(1)'}), '(group.excitations, axis=1)\n', (18722, 18749), True, 'import numpy as np, itertools\n'), ((21423, 21438), 'numpy.abs', 'np.abs', (['sp_vals'], {}), '(sp_vals)\n', (21429, 21438), True, 'import numpy as np, itertools\n'), ((24511, 24527), 'numpy.zeros', 'np.zeros', (['op_dim'], {}), '(op_dim)\n', (24519, 24527), True, 'import numpy as np, itertools\n'), ((14043, 14087), 'McUtils.Data.UnitsData.convert', 'UnitsData.convert', (['"""Hartrees"""', '"""Wavenumbers"""'], {}), "('Hartrees', 'Wavenumbers')\n", (14060, 14087), False, 'from McUtils.Data import UnitsData\n'), ((20712, 20737), 'numpy.logical_not', 'np.logical_not', (['diff_mask'], {}), '(diff_mask)\n', (20726, 20737), True, 'import numpy as np, itertools\n'), ((7169, 7184), 'numpy.abs', 'np.abs', (['sp_vals'], {}), '(sp_vals)\n', (7175, 7184), True, 'import numpy as np, itertools\n'), ((8159, 8178), 'numpy.abs', 'np.abs', (['corrs[o, i]'], {}), '(corrs[o, i])\n', (8165, 8178), True, 'import numpy as np, itertools\n'), ((8319, 8338), 'numpy.abs', 'np.abs', (['corrs[i, o]'], {}), '(corrs[i, o])\n', (8325, 8338), True, 'import numpy as np, itertools\n'), ((17027, 17058), 'numpy.round', 'np.round', (['(100 * deg_transf ** 2)'], {}), '(100 * deg_transf ** 2)\n', (17035, 17058), True, 'import numpy as np, itertools\n'), ((9509, 9537), 'numpy.isin', 'np.isin', (['q_diffs', 'poss_diffs'], {}), '(q_diffs, poss_diffs)\n', (9516, 9537), True, 'import numpy as np, itertools\n'), ((15783, 15827), 'McUtils.Data.UnitsData.convert', 'UnitsData.convert', (['"""Hartrees"""', '"""Wavenumbers"""'], {}), "('Hartrees', 'Wavenumbers')\n", (15800, 15827), False, 'from McUtils.Data import UnitsData\n'), ((9655, 9683), 'numpy.isin', 'np.isin', (['q_diffs', 'poss_diffs'], {}), '(q_diffs, poss_diffs)\n', (9662, 9683), True, 'import numpy as np, itertools\n')] |
""" This module contains functions for plotting fits.
"""
import matplotlib.pyplot as plt
import numpy
def plot_fit(nfig, x_fit, y_fit, x, y, sigma_x, sigma_y,
cp_band_low, cp_band_1, xlabel="Pressure (GPa)",
ylabel="Volume ($\mathbf{\AA^3}$)", output_file=None):
""" Plots fit.
Parameters
----------
nfig : int
x_fit, y_fit : array_like
x, y : array_like
sigma_x, sigma_y : array_like
cp_band_low, cp_band_high : array_like
xlabel : str, optional
ylabel : str, optional
output_file : str or None, optional
"""
plt.figure(nfig)
plt.plot(x_fit, y_fit, "b")
plt.errorbar(x, y, fmt="o", xerr=sigma_x, yerr=sigma_y,
alpha=1.0, capsize = 5, fillstyle="none")
plt.plot(cp_band_low, y_fit, linestyle="--", linewidth=0.75, color="r")
plt.plot(cp_band_1, y_fit, linestyle="--", linewidth=0.75, color="r")
plt.xlim(numpy.min(x_fit), numpy.max(x_fit))
plt.xlabel(xlabel, fontweight="bold")
plt.ylabel(ylabel, fontweight="bold")
plt.tick_params(direction="in", bottom=1, top=1, left=1, right=1)
plt.tight_layout()
if output_file:
plt.savefig(output_file, dpi=1800, bbox_inches="tight")
plt.close()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.errorbar"
] | [((633, 649), 'matplotlib.pyplot.figure', 'plt.figure', (['nfig'], {}), '(nfig)\n', (643, 649), True, 'import matplotlib.pyplot as plt\n'), ((655, 682), 'matplotlib.pyplot.plot', 'plt.plot', (['x_fit', 'y_fit', '"""b"""'], {}), "(x_fit, y_fit, 'b')\n", (663, 682), True, 'import matplotlib.pyplot as plt\n'), ((687, 787), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'y'], {'fmt': '"""o"""', 'xerr': 'sigma_x', 'yerr': 'sigma_y', 'alpha': '(1.0)', 'capsize': '(5)', 'fillstyle': '"""none"""'}), "(x, y, fmt='o', xerr=sigma_x, yerr=sigma_y, alpha=1.0, capsize=\n 5, fillstyle='none')\n", (699, 787), True, 'import matplotlib.pyplot as plt\n'), ((807, 878), 'matplotlib.pyplot.plot', 'plt.plot', (['cp_band_low', 'y_fit'], {'linestyle': '"""--"""', 'linewidth': '(0.75)', 'color': '"""r"""'}), "(cp_band_low, y_fit, linestyle='--', linewidth=0.75, color='r')\n", (815, 878), True, 'import matplotlib.pyplot as plt\n'), ((883, 952), 'matplotlib.pyplot.plot', 'plt.plot', (['cp_band_1', 'y_fit'], {'linestyle': '"""--"""', 'linewidth': '(0.75)', 'color': '"""r"""'}), "(cp_band_1, y_fit, linestyle='--', linewidth=0.75, color='r')\n", (891, 952), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1044), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontweight': '"""bold"""'}), "(xlabel, fontweight='bold')\n", (1017, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1049, 1086), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontweight': '"""bold"""'}), "(ylabel, fontweight='bold')\n", (1059, 1086), True, 'import matplotlib.pyplot as plt\n'), ((1092, 1157), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'direction': '"""in"""', 'bottom': '(1)', 'top': '(1)', 'left': '(1)', 'right': '(1)'}), "(direction='in', bottom=1, top=1, left=1, right=1)\n", (1107, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1163, 1181), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1179, 1181), True, 'import matplotlib.pyplot as plt\n'), ((967, 983), 'numpy.min', 'numpy.min', (['x_fit'], {}), '(x_fit)\n', (976, 983), False, 'import numpy\n'), ((985, 1001), 'numpy.max', 'numpy.max', (['x_fit'], {}), '(x_fit)\n', (994, 1001), False, 'import numpy\n'), ((1211, 1266), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'dpi': '(1800)', 'bbox_inches': '"""tight"""'}), "(output_file, dpi=1800, bbox_inches='tight')\n", (1222, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1286), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1284, 1286), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env
'''
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with this
work for additional information regarding copyright ownership. The ASF
licenses this file to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
The code in this file was developed at Harvard University (2018) and
modified at ChemOS Inc. (2019) as stated in the NOTICE file.
'''
__author__ = '<NAME>'
#=========================================================================
import numpy as np
#=========================================================================
def sigmoid(x):
return 1. / (1. + np.exp( - x))
#=========================================================================
class NumpyGraph(object):
def __init__(self, config, model_details):
self.config = config
self.model_details = model_details
for key, value in self.model_details.items():
setattr(self, '_%s' % str(key), value)
self.feature_size = len(self.config.kernel_names)
self.bnn_output_size = len(self.config.kernel_names)
self.target_size = len(self.config.kernel_names)
def declare_training_data(self, features):
self.num_obs = len(features)
self.features = features
def compute_kernels(self, posteriors):
tau_rescaling = np.zeros((self.num_obs, self.bnn_output_size))
kernel_ranges = self.config.kernel_ranges
for obs_index in range(self.num_obs):
tau_rescaling[obs_index] += kernel_ranges
tau_rescaling = tau_rescaling**2
# sample from BNN
activations = [np.tanh, np.tanh, lambda x: x]
# post_layer_outputs = [self.features]
post_layer_outputs = [np.array([self.features for _ in range(self._num_draws)])]
for layer_index in range(self._num_layers):
weight = posteriors['weight_%d' % layer_index]
bias = posteriors['bias_%d' % layer_index]
activation = activations[layer_index]
outputs = []
for sample_index in range(len(weight)):
single_weight = weight[sample_index]
single_bias = bias[sample_index]
# output = activation( np.matmul( post_layer_outputs[-1], single_weight) + single_bias)
output = activation( np.matmul( post_layer_outputs[-1][sample_index], single_weight) + single_bias)
outputs.append(output)
post_layer_output = np.array(outputs)
post_layer_outputs.append(post_layer_output)
post_bnn_output = post_layer_outputs[-1]
# note: np.random.gamma is parametrized with k and theta, while ed.models.Gamma is parametrized with alpha and beta
post_tau_normed = np.random.gamma( self.num_obs**2 + np.zeros(post_bnn_output.shape), np.ones(post_bnn_output.shape))
post_tau = post_tau_normed / tau_rescaling
post_sqrt_tau = np.sqrt(post_tau)
post_scale = 1. / post_sqrt_tau
# map BNN output to predictions
post_kernels = {}
target_element_index = 0
kernel_element_index = 0
while kernel_element_index < len(self.config.kernel_names):
kernel_type = self.config.kernel_types[kernel_element_index]
kernel_size = self.config.kernel_sizes[kernel_element_index]
feature_begin, feature_end = target_element_index, target_element_index + 1
kernel_begin, kernel_end = kernel_element_index, kernel_element_index + kernel_size
post_relevant = post_bnn_output[:, :, kernel_begin : kernel_end]
lowers = self.config.kernel_lowers[kernel_begin : kernel_end]
uppers = self.config.kernel_uppers[kernel_begin : kernel_end]
post_support = (uppers - lowers) * (1.2 * sigmoid(post_relevant) - 0.1) + lowers
post_kernels['param_%d' % target_element_index] = {'loc': post_support,
'sqrt_prec': post_sqrt_tau[:, :, kernel_begin : kernel_end],
'scale': post_scale[:, :, kernel_begin : kernel_end]}
target_element_index += 1
kernel_element_index += kernel_size
return post_kernels
| [
"numpy.sqrt",
"numpy.ones",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.matmul"
] | [((1800, 1846), 'numpy.zeros', 'np.zeros', (['(self.num_obs, self.bnn_output_size)'], {}), '((self.num_obs, self.bnn_output_size))\n', (1808, 1846), True, 'import numpy as np\n'), ((3209, 3226), 'numpy.sqrt', 'np.sqrt', (['post_tau'], {}), '(post_tau)\n', (3216, 3226), True, 'import numpy as np\n'), ((1150, 1160), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1156, 1160), True, 'import numpy as np\n'), ((2788, 2805), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (2796, 2805), True, 'import numpy as np\n'), ((3105, 3135), 'numpy.ones', 'np.ones', (['post_bnn_output.shape'], {}), '(post_bnn_output.shape)\n', (3112, 3135), True, 'import numpy as np\n'), ((3072, 3103), 'numpy.zeros', 'np.zeros', (['post_bnn_output.shape'], {}), '(post_bnn_output.shape)\n', (3080, 3103), True, 'import numpy as np\n'), ((2657, 2719), 'numpy.matmul', 'np.matmul', (['post_layer_outputs[-1][sample_index]', 'single_weight'], {}), '(post_layer_outputs[-1][sample_index], single_weight)\n', (2666, 2719), True, 'import numpy as np\n')] |
# Copyright 2018-2019 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
def create_lineplot(data, columns, title, ylim, filename):
filtered_data = data.loc[:, columns]
plt.figure(figsize=(45, 16))
plt.title(title, fontsize=45, fontweight='bold')
plt.xticks(rotation=90, fontsize=35)
plt.yticks(fontsize=35, ticks=np.arange(0, 101, 10))
plt.grid(color='#A6A6A6')
ax = sns.lineplot(data=filtered_data, linewidth=5, palette=['green', 'orange', 'red'])
ax.set_ylim(ylim)
ax.legend(labels=['Lingua 0.5.0', 'Tika 1.21', 'Optimaize 0.6'], fontsize=28, loc='lower left')
ax.set_xlabel('Language', fontsize=38, fontweight='bold')
ax.set_ylabel('Accuracy (%)', fontsize=38, fontweight='bold')
plt.tight_layout()
plt.savefig('images/plots/' + filename, dpi=72)
def create_boxplot(data, columns, title, ylim, filename):
filtered_data = data.loc[:, columns]
plt.figure(figsize=(32,12))
plt.title(title, fontsize=45, fontweight='bold')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
plt.grid(color='#A6A6A6')
ax = sns.boxplot(data=filtered_data, linewidth=5, palette=['red', 'orange', 'green'])
ax.set_ylim(ylim)
ax.set_xlabel('Classifier', fontsize=38, fontweight='bold')
ax.set_ylabel('Accuracy (%)', fontsize=38, fontweight='bold')
ax.set_xticklabels(['Optimaize 0.6', 'Tika 1.21', 'Lingua 0.5.0'])
plt.tight_layout()
plt.savefig('images/plots/' + filename, dpi=72)
def create_barplot(data, columns, title, ylim, filename):
filtered_data = data.loc[:, columns]
plt.figure(figsize=(32,12))
plt.title(title, fontsize=45, fontweight='bold')
plt.xticks(fontsize=35)
plt.yticks(fontsize=35)
plt.grid(color='#A6A6A6')
ax = sns.barplot(
data=filtered_data,
palette=['red', 'orange', 'green'],
errwidth=7.0,
ci='sd',
capsize=.1
)
ax.set_ylim(ylim)
ax.set_xlabel('Classifier', fontsize=38, fontweight='bold')
ax.set_ylabel('Mean Accuracy (%)', fontsize=38, fontweight='bold')
ax.set_xticklabels(['Optimaize 0.6', 'Tika 1.21', 'Lingua 0.5.0'])
plt.tight_layout()
plt.savefig('images/plots/' + filename, dpi=72)
sns.set()
sns.set_style('whitegrid')
accuracy_values_data_frame = pd.read_csv(
filepath_or_buffer='accuracy-reports/aggregated-accuracy-values.csv',
delim_whitespace=True
).set_index('language')
accuracy_values_data_frame = accuracy_values_data_frame.reindex(
sorted(accuracy_values_data_frame.columns),
axis=1
)
# SINGLE WORD DETECTION ACCURACY #
create_lineplot(
data=accuracy_values_data_frame,
columns=['single-words-lingua', 'single-words-tika', 'single-words-optimaize'],
title='Single Word Detection',
ylim=[0,100],
filename='lineplot-singlewords.png'
)
create_boxplot(
data=accuracy_values_data_frame,
columns=['single-words-optimaize', 'single-words-tika', 'single-words-lingua'],
title='Single Word Detection',
ylim=[0,100],
filename='boxplot-singlewords.png'
)
create_barplot(
data=accuracy_values_data_frame,
columns=['single-words-optimaize', 'single-words-tika', 'single-words-lingua'],
title='Single Word Detection',
ylim=[0,100],
filename='barplot-singlewords.png'
)
# WORD PAIR DETECTION ACCURACY #
create_lineplot(
data=accuracy_values_data_frame,
columns=['word-pairs-lingua', 'word-pairs-tika', 'word-pairs-optimaize'],
title='Word Pair Detection',
ylim=[0,100],
filename='lineplot-wordpairs.png'
)
create_boxplot(
data=accuracy_values_data_frame,
columns=['word-pairs-optimaize', 'word-pairs-tika', 'word-pairs-lingua'],
title='Word Pair Detection',
ylim=[0,100],
filename='boxplot-wordpairs.png'
)
create_barplot(
data=accuracy_values_data_frame,
columns=['word-pairs-optimaize', 'word-pairs-tika', 'word-pairs-lingua'],
title='Word Pair Detection',
ylim=[0,120],
filename='barplot-wordpairs.png'
)
# SENTENCE DETECTION ACCURACY #
create_lineplot(
data=accuracy_values_data_frame,
columns=['sentences-lingua', 'sentences-tika', 'sentences-optimaize'],
title='Sentence Detection',
ylim=[10,100],
filename='lineplot-sentences.png'
)
create_boxplot(
data=accuracy_values_data_frame,
columns=['sentences-optimaize', 'sentences-tika', 'sentences-lingua'],
title='Sentence Detection',
ylim=[75,100],
filename='boxplot-sentences.png'
)
create_barplot(
data=accuracy_values_data_frame,
columns=['sentences-optimaize', 'sentences-tika', 'sentences-lingua'],
title='Sentence Detection',
ylim=[0,120],
filename='barplot-sentences.png'
)
# AVERAGE DETECTION ACCURACY #
create_lineplot(
data=accuracy_values_data_frame,
columns=['average-lingua', 'average-tika', 'average-optimaize'],
title='Average Detection',
ylim=[0,100],
filename='lineplot-average.png'
)
create_boxplot(
data=accuracy_values_data_frame,
columns=['average-optimaize', 'average-tika', 'average-lingua'],
title='Average Detection',
ylim=[0,100],
filename='boxplot-average.png'
)
create_barplot(
data=accuracy_values_data_frame,
columns=['average-optimaize', 'average-tika', 'average-lingua'],
title='Average Detection',
ylim=[0,105],
filename='barplot-average.png'
)
print("All plots created successfully")
| [
"seaborn.set",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.use",
"seaborn.set_style",
"seaborn.lineplot",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplo... | [((598, 621), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (612, 621), False, 'import matplotlib\n'), ((2852, 2861), 'seaborn.set', 'sns.set', ([], {}), '()\n', (2859, 2861), True, 'import seaborn as sns\n'), ((2862, 2888), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (2875, 2888), True, 'import seaborn as sns\n'), ((823, 851), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(45, 16)'}), '(figsize=(45, 16))\n', (833, 851), True, 'import matplotlib.pyplot as plt\n'), ((856, 904), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(45)', 'fontweight': '"""bold"""'}), "(title, fontsize=45, fontweight='bold')\n", (865, 904), True, 'import matplotlib.pyplot as plt\n'), ((909, 945), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)', 'fontsize': '(35)'}), '(rotation=90, fontsize=35)\n', (919, 945), True, 'import matplotlib.pyplot as plt\n'), ((1007, 1032), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""#A6A6A6"""'}), "(color='#A6A6A6')\n", (1015, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1128), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'filtered_data', 'linewidth': '(5)', 'palette': "['green', 'orange', 'red']"}), "(data=filtered_data, linewidth=5, palette=['green', 'orange',\n 'red'])\n", (1055, 1128), True, 'import seaborn as sns\n'), ((1380, 1398), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1396, 1398), True, 'import matplotlib.pyplot as plt\n'), ((1403, 1450), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/plots/' + filename)"], {'dpi': '(72)'}), "('images/plots/' + filename, dpi=72)\n", (1414, 1450), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(32, 12)'}), '(figsize=(32, 12))\n', (1567, 1585), True, 'import matplotlib.pyplot as plt\n'), ((1589, 1637), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(45)', 'fontweight': '"""bold"""'}), "(title, fontsize=45, fontweight='bold')\n", (1598, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1665), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(35)'}), '(fontsize=35)\n', (1652, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1670, 1693), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(35)'}), '(fontsize=35)\n', (1680, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1723), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""#A6A6A6"""'}), "(color='#A6A6A6')\n", (1706, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1819), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'filtered_data', 'linewidth': '(5)', 'palette': "['red', 'orange', 'green']"}), "(data=filtered_data, linewidth=5, palette=['red', 'orange', 'green']\n )\n", (1745, 1819), True, 'import seaborn as sns\n'), ((2043, 2061), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2059, 2061), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2113), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/plots/' + filename)"], {'dpi': '(72)'}), "('images/plots/' + filename, dpi=72)\n", (2077, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2220, 2248), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(32, 12)'}), '(figsize=(32, 12))\n', (2230, 2248), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2300), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(45)', 'fontweight': '"""bold"""'}), "(title, fontsize=45, fontweight='bold')\n", (2261, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2305, 2328), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(35)'}), '(fontsize=35)\n', (2315, 2328), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2356), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(35)'}), '(fontsize=35)\n', (2343, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2386), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""#A6A6A6"""'}), "(color='#A6A6A6')\n", (2369, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2397, 2504), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'filtered_data', 'palette': "['red', 'orange', 'green']", 'errwidth': '(7.0)', 'ci': '"""sd"""', 'capsize': '(0.1)'}), "(data=filtered_data, palette=['red', 'orange', 'green'],\n errwidth=7.0, ci='sd', capsize=0.1)\n", (2408, 2504), True, 'import seaborn as sns\n'), ((2779, 2797), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2795, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2802, 2849), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('images/plots/' + filename)"], {'dpi': '(72)'}), "('images/plots/' + filename, dpi=72)\n", (2813, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2919, 3028), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': '"""accuracy-reports/aggregated-accuracy-values.csv"""', 'delim_whitespace': '(True)'}), "(filepath_or_buffer=\n 'accuracy-reports/aggregated-accuracy-values.csv', delim_whitespace=True)\n", (2930, 3028), True, 'import pandas as pd\n'), ((980, 1001), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(10)'], {}), '(0, 101, 10)\n', (989, 1001), True, 'import numpy as np\n')] |
import h5py
import numpy
import os
## This file was written by <NAME> (<EMAIL>) for GIZMO ##
def load_from_snapshot(value,ptype,sdir,snum,particle_mask=numpy.zeros(0),axis_mask=numpy.zeros(0),
units_to_physical=True,four_char=False,snapshot_name='snapshot',snapdir_name='snapdir',extension='.hdf5',name_addition=''):
'''
The routine 'load_from_snapshot' is designed to load quantities directly from GIZMO
snapshots in a robust manner, independent of the detailed information actually saved
in the snapshot. It is able to do this because of how HDF5 works, so it --only--
works for HDF5-format snapshots [For binary-format, you need to know -exactly- the
datasets saved and their order in the file, which means you cannot do this, and should
use the 'readsnap.py' routine instead.]
The routine automatically handles multi-part snapshot files for you (concatenating).
This should work with both python2.x and python3.x
Syntax:
loaded_value = load_from_snapshot(value,ptype,sdir,snum,....)
For example, to load the coordinates of gas (type=0) elements in the file
snapshot_001.hdf5 (snum=1) located in the active directory ('.'), just call
xyz_coordinates = load_from_snapshot('Coordinates',0,'.',1)
More details and examples are given in the GIZMO user guide.
Arguments:
value: the value to extract from the HDF5 file. this is a string with the same name
as in the HDF5 file. if you arent sure what those values might be, setting
value to 'keys' will return a list of all the HDF5 keys for the chosen
particle type, or 'header_keys' will return all the keys in the header.
(example: 'Time' returns the simulation time in code units (single scalar).
'Coordinates' will return the [x,y,z] coordinates in an [N,3]
matrix for the N resolution elements of the chosen type)
ptype: element type (int) = 0[gas],1,2,3,4,5[meaning depends on simulation, see
user guide for details]. if your chosen 'value' is in the file header,
this will be ignored
sdir: parent directory (string) of the snapshot file or immediate snapshot
sub-directory if it is a multi-part file
snum: number (int) of the snapshot. e.g. snapshot_001.hdf5 is '1'
Note for multi-part files, this is just the number of the 'set', i.e.
if you have snapshot_001.N.hdf5, set this to '1', not 'N' or '1.N'
Optional:
particle_mask: if set to a mask (boolean array), of length N where N is the number
of elements of the desired ptype, will return only those elements
axis_mask: if set to a mask (boolean array), return only the chosen -axis-. this
is useful for some quantities like metallicity fields, with [N,X] dimensions
where X is large (lets you choose to read just one of the "X")
units_to_physical: default 'True': code will auto-magically try to detect if the
simulation is cosmological by comparing time and redshift information in the
snapshot, and if so, convert units to physical. if you want default snapshot units
set this to 'False'
four_char: default numbering is that snapshots with numbers below 1000 have
three-digit numbers. if they were numbered with four digits (e.g. snapshot_0001),
set this to 'True' (default False)
snapshot_name: default 'snapshot': the code will automatically try a number of
common snapshot and snapshot-directory prefixes. but it can't guess all of them,
especially if you use an unusual naming convention, e.g. naming your snapshots
'xyzBearsBeetsBattleStarGalactica_001.hdf5'. In that case set this to the
snapshot name prefix (e.g. 'xyzBearsBeetsBattleStarGalactica')
snapdir_name: default 'snapdir': like 'snapshot_name', set this if you use a
non-standard prefix for snapshot subdirectories (directories holding multi-part
snapshots pieces)
extension: default 'hdf5': again like 'snapshot' set if you use a non-standard
extension (it checks multiply options like 'h5' and 'hdf5' and 'bin'). but
remember the file must actually be hdf5 format!
'''
# attempt to verify if a file with this name and directory path actually exists
fname,fname_base,fname_ext = check_if_filename_exists(sdir,snum,\
snapshot_name=snapshot_name,snapdir_name=snapdir_name,extension=extension,four_char=four_char,name_addition=name_addition)
# if no valid file found, give up
if(fname=='NULL'):
print('Could not find a valid file with this path/name/extension - please check these settings')
return 0
# check if file has the correct extension
if(fname_ext!=extension):
print('File has the wrong extension, you specified ',extension,' but found ',fname_ext,' - please specify this if it is what you actually want')
return 0
# try to open the file
try:
file = h5py.File(fname,'r') # Open hdf5 snapshot file
except:
print('Unexpected error: could not read hdf5 file ',fname,' . Please check the format, name, and path information is correct')
return 0
# try to parse the header
try:
header_toparse = file["Header"].attrs # Load header dictionary (to parse below)
except:
print('Was able to open the file but not the header, please check this is a valid GIZMO hdf5 file')
file.close()
return 0
# check if desired value is contained in header -- if so just return it and exit
if(value=='header_keys')|(value=='Header_Keys')|(value=='HEADER_KEYS')|(value=='headerkeys')|(value=='HeaderKeys')|(value=='HEADERKEYS')|((value=='keys' and not (ptype==0 or ptype==1 or ptype==2 or ptype==3 or ptype==4 or ptype==5))):
q = header_toparse.keys()
print('Returning list of keys from header, includes: ',q)
file.close()
return q
if(value in header_toparse):
q = header_toparse[value] # value contained in header, no need to go further
file.close()
return q
# ok desired quantity is not in the header, so we need to go into the particle data
# check that a valid particle type is specified
if not (ptype==0 or ptype==1 or ptype==2 or ptype==3 or ptype==4 or ptype==5):
print('Particle type needs to be an integer = 0,1,2,3,4,5. Returning 0')
file.close()
return 0
# check that the header contains the expected data needed to parse the file
if not ('NumFilesPerSnapshot' in header_toparse and 'NumPart_Total' in header_toparse
and 'Time' in header_toparse and 'Redshift' in header_toparse
and 'HubbleParam' in header_toparse and 'NumPart_ThisFile' in header_toparse):
print('Header appears to be missing critical information. Please check that this is a valid GIZMO hdf5 file')
file.close()
return 0
# parse data needed for checking sub-files
numfiles = header_toparse["NumFilesPerSnapshot"]
npartTotal = header_toparse["NumPart_Total"]
if(npartTotal[ptype]<1):
print('No particles of designated type exist in this snapshot, returning 0')
file.close()
return 0
# parse data needed for converting units [if necessary]
if(units_to_physical):
time = header_toparse["Time"]
z = header_toparse["Redshift"]
hubble = header_toparse["HubbleParam"]
cosmological = False
ascale = 1.0;
# attempt to guess if this is a cosmological simulation from the agreement or lack thereof between time and redshift. note at t=1,z=0, even if non-cosmological, this won't do any harm
if(numpy.abs(time*(1.+z)-1.) < 1.e-6):
cosmological=True; ascale=time;
# close the initial header we are parsing
file.close()
# now loop over all snapshot segments to identify and extract the relevant particle data
check_counter = 0
for i_file in range(numfiles):
# augment snapshot sub-set number
if (numfiles>1): fname = fname_base+'.'+str(i_file)+fname_ext
# check for existence of file
if(os.stat(fname).st_size>0):
# exists, now try to read it
try:
file = h5py.File(fname,'r') # Open hdf5 snapshot file
except:
print('Unexpected error: could not read hdf5 file ',fname,' . Please check the format, name, and path information is correct, and that this file is not corrupted')
return 0
# read in, now attempt to parse. first check for needed information on particle number
npart = file["Header"].attrs["NumPart_ThisFile"]
if(npart[ptype] >= 1):
# return particle key data, if requested
if((value=='keys')|(value=='Keys')|(value=='KEYS')):
q = list(file['PartType'+str(ptype)].keys())
print('Returning list of valid keys for this particle type: ',q)
file.close()
return q
# check if requested data actually exists as a valid keyword in the file
if not (value in file['PartType'+str(ptype)].keys()):
print('The value ',value,' given does not appear to exist in the file ',fname," . Please check that you have specified a valid keyword. You can run this routine with the value 'keys' to return a list of valid value keys. Returning 0")
file.close()
return 0
# now actually read the data
axis_mask = numpy.array(axis_mask)
if(axis_mask.size > 0):
q_t = numpy.array(file['PartType'+str(ptype)+'/'+value+'/']).take(axis_mask,axis=1)
else:
q_t = numpy.array(file['PartType'+str(ptype)+'/'+value+'/'])
# check data has non-zero size
if(q_t.size > 0):
# if this is the first time we are actually reading it, parse it and determine the shape of the vector, to build the data container
if(check_counter == 0):
qshape=numpy.array(q_t.shape); qshape[0]=0; q=numpy.zeros(qshape); check_counter+=1;
# add the data to our appropriately-shaped container, now
try:
q = numpy.concatenate([q,q_t],axis=0)
except:
print('Could not concatenate data for ',value,' in file ',fname,' . The format appears to be inconsistent across your snapshots or with the usual GIZMO conventions. Please check this is a valid GIZMO snapshot file.')
file.close()
return 0
file.close()
else:
print('Expected file ',fname,' appears to be missing. Check if your snapshot has the complete data set here')
# convert units if requested by the user. note this only does a few obvious units: there are many possible values here which cannot be anticipated!
if(units_to_physical):
hinv=1./hubble; rconv=ascale*hinv;
if((value=='Coordinates')|(value=='SmoothingLength')): q*=rconv; # comoving length
if(value=='Velocities'): q *= numpy.sqrt(ascale); # special comoving velocity units
if((value=='Density')|(value=='Pressure')): q *= hinv/(rconv*rconv*rconv); # density = mass/comoving length^3
if((value=='StellarFormationTime')&(cosmological==False)): q*=hinv; # time has h^-1 in non-cosmological runs
if((value=='Masses')|('BH_Mass' in value)|(value=='CosmicRayEnergy')|(value=='PhotonEnergy')): q*=hinv; # mass x [no-h] units
# return final value, if we have not already
particle_mask=numpy.array(particle_mask)
if(particle_mask.size > 0): q=q.take(particle_mask,axis=0)
return q
def check_if_filename_exists(sdir,snum,snapshot_name='snapshot',snapdir_name='snapdir',extension='.hdf5',four_char=False,name_addition=''):
'''
This subroutine attempts to check if a snapshot or snapshot directory with
valid GIZMO outputs exists. It will check several common conventions for
file and directory names, and extensions.
Input:
sdir: parent directory of the snapshot file or immediate snapshot sub-directory
if it is a multi-part file. string.
snum: number (int) of the snapshot. e.g. snapshot_001.hdf5 is '1'
Optional:
snapshot_name: default 'snapshot': the code will automatically try a number of
common snapshot and snapshot-directory prefixes. but it can't guess all of them,
especially if you use an unusual naming convention, e.g. naming your snapshots
'xyzBearsBeetsBattleStarGalactica_001.hdf5'. In that case set this to the
snapshot name prefix (e.g. 'xyzBearsBeetsBattleStarGalactica')
snapdir_name: default 'snapdir': like 'snapshot_name', set this if you use a
non-standard prefix for snapshot subdirectories (directories holding multi-part
snapshots pieces)
extension: default 'hdf5': again like 'snapshot' set if you use a non-standard
extension (it checks multiply options like 'h5' and 'hdf5' and 'bin'). but
remember the file must actually be hdf5 format!
four_char: default numbering is that snapshots with numbers below 1000 have
three-digit numbers. if they were numbered with four digits (e.g. snapshot_0001),
set this to 'True' (default False)
'''
# loop over possible extension names to try and check for valid files
for extension_touse in [extension,'.h5','.bin','']:
fname=sdir+'/'+snapshot_name+'_'
# begin by identifying the snapshot extension with the file number
ext='00'+str(snum);
if (snum>=10): ext='0'+str(snum)
if (snum>=100): ext=str(snum)
if (four_char==True): ext='0'+ext
if (snum>=1000): ext=str(snum)
fname+=ext
fname+=name_addition
fname_base=fname
# isolate the specific path up to the snapshot name, because we will try to append several different choices below
s0=sdir.split("/"); snapdir_specific=s0[len(s0)-1];
if(len(snapdir_specific)<=1): snapdir_specific=s0[len(s0)-2];
## try several common notations for the directory/filename structure
fname=fname_base+extension_touse;
if not os.path.exists(fname):
## is it a multi-part file?
fname=fname_base+'.0'+extension_touse;
if not os.path.exists(fname):
## is the filename 'snap' instead of 'snapshot'?
fname_base=sdir+'/snap_'+ext;
fname=fname_base+extension_touse;
if not os.path.exists(fname):
## is the filename 'snap' instead of 'snapshot', AND its a multi-part file?
fname=fname_base+'.0'+extension_touse;
if not os.path.exists(fname):
## is the filename 'snap(snapdir)' instead of 'snapshot'?
fname_base=sdir+'/snap_'+snapdir_specific+'_'+ext;
fname=fname_base+extension_touse;
if not os.path.exists(fname):
## is the filename 'snap' instead of 'snapshot', AND its a multi-part file?
fname=fname_base+'.0'+extension_touse;
if not os.path.exists(fname):
## is it in a snapshot sub-directory? (we assume this means multi-part files)
fname_base=sdir+'/'+snapdir_name+'_'+ext+'/'+snapshot_name+'_'+ext;
fname=fname_base+'.0'+extension_touse;
if not os.path.exists(fname):
## is it in a snapshot sub-directory AND named 'snap' instead of 'snapshot'?
fname_base=sdir+'/'+snapdir_name+'_'+ext+'/'+'snap_'+ext;
fname=fname_base+'.0'+extension_touse;
if not os.path.exists(fname):
## wow, still couldn't find it... ok, i'm going to give up!
fname_found = 'NULL'
fname_base_found = 'NULL'
fname_ext = 'NULL'
continue;
if(os.stat(fname).st_size <= 0):
## file exists but is null size, do not use
fname_found = 'NULL'
fname_base_found = 'NULL'
fname_ext = 'NULL'
continue;
fname_found = fname;
fname_base_found = fname_base;
fname_ext = extension_touse
break; # filename does exist!
return fname_found, fname_base_found, fname_ext;
| [
"numpy.abs",
"os.path.exists",
"numpy.sqrt",
"h5py.File",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"os.stat"
] | [((154, 168), 'numpy.zeros', 'numpy.zeros', (['(0)'], {}), '(0)\n', (165, 168), False, 'import numpy\n'), ((179, 193), 'numpy.zeros', 'numpy.zeros', (['(0)'], {}), '(0)\n', (190, 193), False, 'import numpy\n'), ((12032, 12058), 'numpy.array', 'numpy.array', (['particle_mask'], {}), '(particle_mask)\n', (12043, 12058), False, 'import numpy\n'), ((5189, 5210), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (5198, 5210), False, 'import h5py\n'), ((7908, 7941), 'numpy.abs', 'numpy.abs', (['(time * (1.0 + z) - 1.0)'], {}), '(time * (1.0 + z) - 1.0)\n', (7917, 7941), False, 'import numpy\n'), ((11541, 11559), 'numpy.sqrt', 'numpy.sqrt', (['ascale'], {}), '(ascale)\n', (11551, 11559), False, 'import numpy\n'), ((14754, 14775), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (14768, 14775), False, 'import os\n'), ((14884, 14905), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (14898, 14905), False, 'import os\n'), ((15073, 15094), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (15087, 15094), False, 'import os\n'), ((15251, 15272), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (15265, 15272), False, 'import os\n'), ((15470, 15491), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (15484, 15491), False, 'import os\n'), ((15648, 15669), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (15662, 15669), False, 'import os\n'), ((15909, 15930), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (15923, 15930), False, 'import os\n'), ((16159, 16180), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (16173, 16180), False, 'import os\n'), ((8370, 8384), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (8377, 8384), False, 'import os\n'), ((8479, 8500), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (8488, 8500), False, 'import h5py\n'), ((9834, 9856), 'numpy.array', 'numpy.array', (['axis_mask'], {}), '(axis_mask)\n', (9845, 9856), False, 'import numpy\n'), ((16390, 16404), 'os.stat', 'os.stat', (['fname'], {}), '(fname)\n', (16397, 16404), False, 'import os\n'), ((10434, 10456), 'numpy.array', 'numpy.array', (['q_t.shape'], {}), '(q_t.shape)\n', (10445, 10456), False, 'import numpy\n'), ((10473, 10492), 'numpy.zeros', 'numpy.zeros', (['qshape'], {}), '(qshape)\n', (10484, 10492), False, 'import numpy\n'), ((10643, 10678), 'numpy.concatenate', 'numpy.concatenate', (['[q, q_t]'], {'axis': '(0)'}), '([q, q_t], axis=0)\n', (10660, 10678), False, 'import numpy\n')] |
import matplotlib
matplotlib.use('Agg')
import torch
from torch.utils.data import DataLoader
from data_loader import ecg_dataset_simple, ecg_dataset_complex
from data_loader import ecg_dataset_complex_PCA
from data_loader import ecg_dataset_very_simple
from nn_model import simple_net, complex_net, end_to_end_model
from nn_model import partial_end_to_end_model, end_to_end_fc_model
from nn_model import end_to_end_fc_model_no_bn
from nn_model import MLCNN
from nets import get_senet_small
from nn_model import Graph_ConvNet_LeNet5
from nn_trainer import simple_trainer, end_to_end_trainer
from nn_trainer import ml_cnn_trainer
from nn_trainer import ml_cnn_consensus_trainer
# import torch.nn.functional as F
import numpy as np
import pdb
# import mat
# import pathlib
from pathlib import Path
from cfg import process_config
from nn_model import loss_with_consensus
from ptbds import data
if torch.cuda.is_available():
print('cuda available')
dtypeFloat = torch.cuda.FloatTensor
dtypeLong = torch.cuda.LongTensor
torch.cuda.manual_seed(1)
else:
print('cuda not available')
dtypeFloat = torch.FloatTensor
dtypeLong = torch.LongTensor
torch.manual_seed(1)
def get_pca():
A1 = np.random.random((149, 149))
A1 = A1 + A1.T
U1, S1, V1 = np.linalg.svd(A1)
odd_subspace = U1[:, :75]
even_subspace = U1[:, 75:]
return odd_subspace, even_subspace
if __name__ == "__main__":
config = process_config('config_mlcnn.json')
# ptb_tdir = Path(config.data_dir)
# ptb_tdir_str = str(ptb_tdir / 'data') + '/'
# ptb_dat_dir = ptb_tdir / 'data'
# patient_list_file = str(config.patient_file)
# control_list_file = str(config.control_file)
# positive_list_file = str(config.positive_file)
# with open(patient_list_file, 'r') as f:
# patient_list = f.read().splitlines()
# with open(control_list_file, 'r') as f:
# control_list = f.read().splitlines()
# with open(positive_list_file, 'r') as f:
# positive_list = f.read().splitlines()
# May need to do proper beat segmentation
# Din = config.Din
# batch_size = config.train['batch_size']
# num_tr_points = config.num_tr_points
# frac_tr_points = config.frac_tr_points
# num_tr_points = int(frac_tr_points * len(patient_list))
# channels = config.channels
# tot_contr_post = len(control_list) + len(positive_list)
# contr_tr_pts = int(frac_tr_points * len(control_list))
# post_tr_pts = int(frac_tr_points * len(positive_list))
# contr_tr_pts = int(num_tr_points*len(control_list)/tot_contr_post)
# post_tr_pts = int(num_tr_points*len(positive_list)/tot_contr_post)
# remain_tr_pts = num_tr_points - contr_tr_pts - post_tr_pts
# remainder_list = list(set(patient_list) ^ set(control_list) ^ set(positive_list))
# pdb.set_trace()
# train_list = (control_list[:contr_tr_pts] + positive_list[:post_tr_pts])
# train_list = (control_list[:contr_tr_pts] + positive_list[:post_tr_pts] +
# remainder_list[:remain_tr_pts])
# test_list = (control_list[contr_tr_pts:] + positive_list[post_tr_pts:])
# test_list = (control_list[contr_tr_pts:] + positive_list[post_tr_pts:] +
# remainder_list[remain_tr_pts:])
# contr_list = (control_list[:contr_tr_pts])
# post_list = (positive_list[:post_tr_pts])
# num_inp_channels = len(channels)
# odd_subspace, even_subspace = get_pca()
# pdb.set_trace()
# pdb.set_trace()
with torch.cuda.device(1):
# x = 'patient095/s0377lre'
# if x in train_list:
# train_list.remove('patient095/s0377lre')
# elif x in test_list:
# test_list.remove('patient095/s0377lre')
# # ecg_train_loader = DataLoader(ecg_dataset_complex(ptb_tdir_str, train_list,
# control_list,
# positive_list,
# Din,
# num_consensus=config['cons'],
# channels=channels),
# batch_size=batch_size, shuffle=True, num_workers=2)
# ecg_test_loader = DataLoader(ecg_dataset_complex(ptb_tdir_str, test_list,
# control_list,
# positive_list,
# Din,
# num_consensus=config['cons'],
# channels=channels),
# batch_size=batch_size, shuffle=False, num_workers=2)
ecg_train_loader = data.trn_dl
ecg_test_loader = data.val_dl
# tot = tot_contr_post
# c1 = len(positive_list) / tot
# c0 = len(control_list) / tot
# loss_ce = torch.nn.CrossEntropyLoss().type(
# dtypeFloat))
loss_ce = torch.nn.NLLLoss()
loss_ce.cuda()
# loss_fn = loss_with_consensus(loss_ce, config)
# loss_fn.cuda()
# e2e_nn = end_to_end_fc_model_no_bn(cnet_parameters, gnet_parameters)
# ml_cnn_nn = MLCNN(config)
ml_cnn_nn = get_senet_small()
# e2e_trainer = end_to_end_trainer(e2e_nn, ecg_train_loader,
# ecg_test_loader, loss_fn, tovis=False)
ml_trainer = ml_cnn_trainer(config, ecg_train_loader, ecg_test_loader,
ml_cnn_nn, loss_ce, optimizer='adam')
# pdb.set_trace()
ml_trainer.train_model(num_epoch=30)
# ml_trainer.test_model()
# get all the last layer predn from the CNN
# Put the weights onto the graph
# graph structure to learn on is very small
# Basically equivalent to making N different
# GCNs and working with them.
# Take the graph and extrapolate it backwards
# Use LeNet like structure here as well
| [
"torch.manual_seed",
"torch.cuda.device",
"matplotlib.use",
"numpy.random.random",
"nets.get_senet_small",
"torch.cuda.is_available",
"torch.nn.NLLLoss",
"numpy.linalg.svd",
"nn_trainer.ml_cnn_trainer",
"torch.cuda.manual_seed",
"cfg.process_config"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((895, 920), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (918, 920), False, 'import torch\n'), ((1032, 1057), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (1054, 1057), False, 'import torch\n'), ((1168, 1188), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (1185, 1188), False, 'import torch\n'), ((1215, 1243), 'numpy.random.random', 'np.random.random', (['(149, 149)'], {}), '((149, 149))\n', (1231, 1243), True, 'import numpy as np\n'), ((1280, 1297), 'numpy.linalg.svd', 'np.linalg.svd', (['A1'], {}), '(A1)\n', (1293, 1297), True, 'import numpy as np\n'), ((1440, 1475), 'cfg.process_config', 'process_config', (['"""config_mlcnn.json"""'], {}), "('config_mlcnn.json')\n", (1454, 1475), False, 'from cfg import process_config\n'), ((3498, 3518), 'torch.cuda.device', 'torch.cuda.device', (['(1)'], {}), '(1)\n', (3515, 3518), False, 'import torch\n'), ((5133, 5151), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (5149, 5151), False, 'import torch\n'), ((5392, 5409), 'nets.get_senet_small', 'get_senet_small', ([], {}), '()\n', (5407, 5409), False, 'from nets import get_senet_small\n'), ((5549, 5648), 'nn_trainer.ml_cnn_trainer', 'ml_cnn_trainer', (['config', 'ecg_train_loader', 'ecg_test_loader', 'ml_cnn_nn', 'loss_ce'], {'optimizer': '"""adam"""'}), "(config, ecg_train_loader, ecg_test_loader, ml_cnn_nn,\n loss_ce, optimizer='adam')\n", (5563, 5648), False, 'from nn_trainer import ml_cnn_trainer\n')] |
'''
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
'''
import os
import time
import datetime
import argparse
import rasterio
import rasterio.mask
import numpy as np
import glob
from skimage.measure import find_contours
from skimage.draw import polygon
parser = argparse.ArgumentParser(description='Solar Installations mapping post-processing script')
parser.add_argument('--model_predictions', type=str, help='Path to a where \
predictions from model 1 are stored')
parser.add_argument('--output_dir', type=str, help='Path to a directory where \
outputs will be saved. This directory will be created if it does \
not exist.')
args = parser.parse_args()
def main():
print("Starting postprocessing script at %s" % (str(datetime.datetime.now())))
# -------------------
# Load files
# -------------------
assert os.path.exists(args.model_predictions)
# Ensure output directory exists
if os.path.exists(args.output_dir):
if len(os.listdir(args.output_dir)) > 0:
print("WARNING: The output directory is not empty, but we are ignoring that and writing data.")
else:
os.makedirs(args.output_dir, exist_ok=True)
fns = []
fns.extend(glob.glob(os.path.join(args.model_predictions + '/predictions', '*.tif')))
print("Running on %d files" % (len(fns)))
# -------------------
# Load files and post-process them
# -------------------
for fn_idx, fn in enumerate(fns):
model2_fn = os.path.join(args.model_predictions + '/predictions3/',
os.path.basename(fn))
fn_parts = fn.split("/")
new_fn = fn_parts[-1][:-4] + "_postprocessed.tif"
# Read data model 1
with rasterio.open(os.path.join(args.model_predictions, fn)) as f:
data1 = f.read()
data1 = np.squeeze(data1)
# Read data model 2
with rasterio.open(model2_fn) as f2:
data2 = f2.read()
data2 = np.squeeze(data2)
profile = f2.profile
height, width = data2.shape
#Read image tile
tile_fn = os.path.join(args.model_predictions + '/tiles/', str(os.path.basename(fn)).replace('_predictions.tif', '.tif'))
with rasterio.open(tile_fn) as f:
tile = f.read()
img = np.moveaxis(tile, 0, 2)
output = np.zeros((height, width), dtype=np.uint8)
# Post process predictions
output[(data1 == 1) & (data2 == 1)] = 1
print("got here")
# Water index
NDWI = (img[:, :, 2] - img[:, :, 7]) / (img[:, :, 2] + img[:, :, 7] + 0.0001)
print(np.max(NDWI))
print(np.mean(NDWI))
# Remove solar panels on water
output[NDWI > 30] = 0
# remove predictions over clouds or snow
output[img[:, :, 0] > 1300] = 0
contours = find_contours(output, 0.5)
for n, contour in enumerate(contours):
# Construct the rotatedbox. If its aspect ratio is too small, we ignore it
ll, ur = np.min(contour, 0), np.max(contour, 0)
wh = ur - ll
if (wh[0] * wh[1] > 49):
continue
else:
# Zero out small polygons
rr, cc = polygon(contour[:, 0], contour[:, 1], output.shape)
output[rr, cc] = 0
# output[output==1]=0
# Save post-processed predictions
new_profile = profile.copy()
new_profile["count"] = 1
new_profile["dtype"] = "uint8"
new_profile["compress"] = "lzw"
with rasterio.open(os.path.join(args.output_dir, new_fn), "w", **new_profile) as f:
f.write(output, 1)
f.write_colormap(1, {
0: (24, 154, 211, 0),
1: (255, 211, 0, 255),
})
if __name__ == "__main__":
main() | [
"os.path.exists",
"numpy.mean",
"os.listdir",
"os.makedirs",
"argparse.ArgumentParser",
"rasterio.open",
"os.path.join",
"numpy.squeeze",
"numpy.max",
"datetime.datetime.now",
"numpy.zeros",
"os.path.basename",
"numpy.min",
"numpy.moveaxis",
"skimage.measure.find_contours",
"skimage.dr... | [((306, 400), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Solar Installations mapping post-processing script"""'}), "(description=\n 'Solar Installations mapping post-processing script')\n", (329, 400), False, 'import argparse\n'), ((959, 997), 'os.path.exists', 'os.path.exists', (['args.model_predictions'], {}), '(args.model_predictions)\n', (973, 997), False, 'import os\n'), ((1043, 1074), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1057, 1074), False, 'import os\n'), ((1252, 1295), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (1263, 1295), False, 'import os\n'), ((2487, 2528), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (2495, 2528), True, 'import numpy as np\n'), ((2993, 3019), 'skimage.measure.find_contours', 'find_contours', (['output', '(0.5)'], {}), '(output, 0.5)\n', (3006, 3019), False, 'from skimage.measure import find_contours\n'), ((1335, 1397), 'os.path.join', 'os.path.join', (["(args.model_predictions + '/predictions')", '"""*.tif"""'], {}), "(args.model_predictions + '/predictions', '*.tif')\n", (1347, 1397), False, 'import os\n'), ((1684, 1704), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (1700, 1704), False, 'import os\n'), ((1951, 1968), 'numpy.squeeze', 'np.squeeze', (['data1'], {}), '(data1)\n', (1961, 1968), True, 'import numpy as np\n'), ((2011, 2035), 'rasterio.open', 'rasterio.open', (['model2_fn'], {}), '(model2_fn)\n', (2024, 2035), False, 'import rasterio\n'), ((2093, 2110), 'numpy.squeeze', 'np.squeeze', (['data2'], {}), '(data2)\n', (2103, 2110), True, 'import numpy as np\n'), ((2370, 2392), 'rasterio.open', 'rasterio.open', (['tile_fn'], {}), '(tile_fn)\n', (2383, 2392), False, 'import rasterio\n'), ((2445, 2468), 'numpy.moveaxis', 'np.moveaxis', (['tile', '(0)', '(2)'], {}), '(tile, 0, 2)\n', (2456, 2468), True, 'import numpy as np\n'), ((2763, 2775), 'numpy.max', 'np.max', (['NDWI'], {}), '(NDWI)\n', (2769, 2775), True, 'import numpy as np\n'), ((2791, 2804), 'numpy.mean', 'np.mean', (['NDWI'], {}), '(NDWI)\n', (2798, 2804), True, 'import numpy as np\n'), ((851, 874), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (872, 874), False, 'import datetime\n'), ((1091, 1118), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (1101, 1118), False, 'import os\n'), ((1854, 1894), 'os.path.join', 'os.path.join', (['args.model_predictions', 'fn'], {}), '(args.model_predictions, fn)\n', (1866, 1894), False, 'import os\n'), ((3176, 3194), 'numpy.min', 'np.min', (['contour', '(0)'], {}), '(contour, 0)\n', (3182, 3194), True, 'import numpy as np\n'), ((3196, 3214), 'numpy.max', 'np.max', (['contour', '(0)'], {}), '(contour, 0)\n', (3202, 3214), True, 'import numpy as np\n'), ((3387, 3438), 'skimage.draw.polygon', 'polygon', (['contour[:, 0]', 'contour[:, 1]', 'output.shape'], {}), '(contour[:, 0], contour[:, 1], output.shape)\n', (3394, 3438), False, 'from skimage.draw import polygon\n'), ((3725, 3762), 'os.path.join', 'os.path.join', (['args.output_dir', 'new_fn'], {}), '(args.output_dir, new_fn)\n', (3737, 3762), False, 'import os\n'), ((2298, 2318), 'os.path.basename', 'os.path.basename', (['fn'], {}), '(fn)\n', (2314, 2318), False, 'import os\n')] |
import numpy as np
import pandas as pd
class Agent:
def __init__(self,
env,
data_path=None,
gamma=0.9,
learning_rate=0.1,
epsilon=.1): # 初始化参数奖励衰减系数0.9,学习率0.1
self.gamma = gamma
self.learning_rate = learning_rate
self.epsilon = epsilon
self.time_count = env.time_count
self.action_n = env.action_space
self.x_count = env.x_count
self.y_count = env.y_count
if data_path is None:
self.qshape = (self.x_count, self.y_count, self.time_count,
self.x_count, self.y_count, self.action_n)
self.q = np.zeros(self.qshape)
# 边界初始化
for des_x in range(self.x_count):
for des_y in range(self.y_count):
for t in range(self.time_count):
for x in range(self.x_count):
self.q[des_x, des_y, t, x, 0,
0] = np.finfo(np.float32).min
self.q[des_x, des_y, t, x, 0,
1] = np.finfo(np.float32).min
self.q[des_x, des_y, t, x, 0,
7] = np.finfo(np.float32).min
self.q[des_x, des_y, t, x, self.y_count - 1,
3] = np.finfo(np.float32).min
self.q[des_x, des_y, t, x, self.y_count - 1,
4] = np.finfo(np.float32).min
self.q[des_x, des_y, t, x, self.y_count - 1,
5] = np.finfo(np.float32).min
for y in range(self.y_count):
self.q[des_x, des_y, t, 0, y,
5] = np.finfo(np.float32).min
self.q[des_x, des_y, t, 0, y,
6] = np.finfo(np.float32).min
self.q[des_x, des_y, t, 0, y,
7] = np.finfo(np.float32).min
self.q[des_x, des_y, t, self.x_count - 1, y,
1] = np.finfo(np.float32).min
self.q[des_x, des_y, t, self.x_count - 1, y,
2] = np.finfo(np.float32).min
self.q[des_x, des_y, t, self.x_count - 1, y,
3] = np.finfo(np.float32).min
else:
self.q = np.load(data_path)
def _obs2txy(self, obs):
t = int(obs['time']) % 24
x = obs['position'][0]
y = obs['position'][1]
return t, x, y
def decide(self, obs, des_x, des_y): # 智能体决策,epsilon贪心决策,输入参数状态
t, x, y = self._obs2txy(obs)
rand = np.random.uniform(0, 1)
if rand > self.epsilon:
# 需要洗牌,防止选idxmax时一直为最大值中最小的索引
state_action = pd.Series(data=np.array(self.q[des_x, des_y, t, x,
y, :]))
state_action = state_action.reindex(
np.random.permutation(state_action.index))
action = state_action.idxmax()
else:
action = np.random.randint(self.action_n)
return action
def learnQ(self, obs, action, reward, next_obs, done, des_x,
des_y): # 智能体学习,Q-learning
t, x, y = self._obs2txy(obs)
nt, nx, ny = self._obs2txy(next_obs)
# print("此轮更新行动"+str(action))
u = reward + self.gamma * self.q[des_x, des_y, nt, nx,
ny].max() * (1. - done)
td_error = u - self.q[des_x, des_y, t, x, y, action]
self.q[des_x, des_y, t, x, y,
action] += self.learning_rate * td_error # 更新Q表
'''
def learnS(self, obs, action, reward, next_obs,
done): # 智能体学习,SARSA-learning
state = self._obs2state(obs)
next_state = self._obs2state(next_obs)
v = (self.q[des_x, des_y, next_state].sum() * self.epsilon +
self.q[des_x, des_y, next_state].max() * (1. - self.epsilon)
) # 计算(next_state,next_action)Q值期望
u = reward + self.gamma * v * (1. - done)
td_error = u - self.q[des_x, des_y, state, action]
self.q[des_x, des_y, state,
action] += self.learning_rate * td_error # 更新Q表
'''
| [
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"numpy.finfo",
"numpy.load",
"numpy.random.permutation"
] | [((2890, 2913), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2907, 2913), True, 'import numpy as np\n'), ((699, 720), 'numpy.zeros', 'np.zeros', (['self.qshape'], {}), '(self.qshape)\n', (707, 720), True, 'import numpy as np\n'), ((2600, 2618), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2607, 2618), True, 'import numpy as np\n'), ((3318, 3350), 'numpy.random.randint', 'np.random.randint', (['self.action_n'], {}), '(self.action_n)\n', (3335, 3350), True, 'import numpy as np\n'), ((3197, 3238), 'numpy.random.permutation', 'np.random.permutation', (['state_action.index'], {}), '(state_action.index)\n', (3218, 3238), True, 'import numpy as np\n'), ((3030, 3072), 'numpy.array', 'np.array', (['self.q[des_x, des_y, t, x, y, :]'], {}), '(self.q[des_x, des_y, t, x, y, :])\n', (3038, 3072), True, 'import numpy as np\n'), ((1042, 1062), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1050, 1062), True, 'import numpy as np\n'), ((1165, 1185), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1173, 1185), True, 'import numpy as np\n'), ((1288, 1308), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1296, 1308), True, 'import numpy as np\n'), ((1426, 1446), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1434, 1446), True, 'import numpy as np\n'), ((1564, 1584), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1572, 1584), True, 'import numpy as np\n'), ((1702, 1722), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1710, 1722), True, 'import numpy as np\n'), ((1880, 1900), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1888, 1900), True, 'import numpy as np\n'), ((2003, 2023), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2011, 2023), True, 'import numpy as np\n'), ((2126, 2146), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2134, 2146), True, 'import numpy as np\n'), ((2264, 2284), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2272, 2284), True, 'import numpy as np\n'), ((2402, 2422), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2410, 2422), True, 'import numpy as np\n'), ((2540, 2560), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2548, 2560), True, 'import numpy as np\n')] |
# coding=UTF-8
import numpy as np
import tensorflow.keras as keras
import tensorflow as tf
import os
import cv2
from tqdm import tqdm
import random
import filetype
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.callbacks import TensorBoard
import time
# DATADIR = "/media/alfonso/COMPARTIDA/devel/Tensorflow/imagenesLokro/imagenes_clas/"
DATADIR = '/Volumes/COMPARTIDA/devel/Tensorflow/imagenesLokro/imagenes_clas/'
# CATEGORIAS = ['dormido', 'despierto', 'otro', 'barriba', 'ausente']
CATEGORIAS = ['dormido', 'despierto', 'otro']
IMG_SIZE = 70
NAME = "bz16-adam-c5-{}".format(int(time.time()))
training_data = []
def create_training_data():
for category in CATEGORIAS: # do dogs and cats
path = os.path.join(DATADIR, category) # create path to dogs and cats
class_num = CATEGORIAS.index(category) # get the classification (0 or a 1). 0=dormido 1=despierto
for img in tqdm(os.listdir(path)):
tipo_archivo = filetype.guess(os.path.join(path, img))
if tipo_archivo is not None and tipo_archivo.mime == 'image/jpeg':
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert to array
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
training_data.append([new_array, class_num]) # add this to our training_data
except Exception as e:
pass
create_training_data()
print("Tamaño del conjunto de entrenamiento: " + str(len(training_data)))
random.shuffle(training_data)
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
X = X / 255.0
model = Sequential()
model.add(Flatten())
model.add(Dense(648))
model.add(Activation('relu'))
model.add(Dense(648))
model.add(Activation('relu'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
model.fit(X, np.array(y), batch_size=8,
epochs=20,
validation_split=0.3,
callbacks=[tensorboard])
# model.save("modeloLokro3m.h5")
| [
"os.listdir",
"random.shuffle",
"cv2.resize",
"os.path.join",
"numpy.array",
"tensorflow.keras.layers.Dense",
"time.time",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Sequential"
] | [((1677, 1706), 'random.shuffle', 'random.shuffle', (['training_data'], {}), '(training_data)\n', (1691, 1706), False, 'import random\n'), ((1880, 1892), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1890, 1892), False, 'from tensorflow.keras.models import Sequential\n'), ((1904, 1913), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1911, 1913), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1926, 1936), 'tensorflow.keras.layers.Dense', 'Dense', (['(648)'], {}), '(648)\n', (1931, 1936), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1948, 1966), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1958, 1966), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1979, 1989), 'tensorflow.keras.layers.Dense', 'Dense', (['(648)'], {}), '(648)\n', (1984, 1989), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2001, 2019), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2011, 2019), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2032, 2040), 'tensorflow.keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (2037, 2040), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2052, 2073), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2062, 2073), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2271, 2282), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2279, 2282), True, 'import numpy as np\n'), ((681, 692), 'time.time', 'time.time', ([], {}), '()\n', (690, 692), False, 'import time\n'), ((812, 843), 'os.path.join', 'os.path.join', (['DATADIR', 'category'], {}), '(DATADIR, category)\n', (824, 843), False, 'import os\n'), ((1809, 1820), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1817, 1820), True, 'import numpy as np\n'), ((1009, 1025), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1019, 1025), False, 'import os\n'), ((1070, 1093), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1082, 1093), False, 'import os\n'), ((1337, 1380), 'cv2.resize', 'cv2.resize', (['img_array', '(IMG_SIZE, IMG_SIZE)'], {}), '(img_array, (IMG_SIZE, IMG_SIZE))\n', (1347, 1380), False, 'import cv2\n'), ((1238, 1261), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (1250, 1261), False, 'import os\n')] |
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
import numpy as np
from platypus import Problem, Subset, NSGAII
from solver.solver import Solver
class MultiObjectiveOptimizationSolver(Solver):
def __init__(self, iterations=1000):
super().__init__()
self.iterations = iterations
self.us = []
self.ws = []
def solve(self):
print(self.used_components)
u, w = self.build_problems()
if isinstance(u, np.float):
return u, w
with Pool(cpu_count()) as p:
u, w = p.map(self._solve, [(u, self.iterations, True), (w, self.iterations, False)])
return u, w
@staticmethod
def _solve(args):
problem, iterations, use_max = args
algorithm = NSGAII(problem)
algorithm.run(iterations)
feasible_solutions = [s.objectives[0] for s in algorithm.result if s.feasible]
return max(feasible_solutions) if use_max else min(feasible_solutions)
def u_function(self, x):
x = x[0][0]
u1, u2, u3 = [self.us[i].find_mfx(x) if self.used_components[i] else None for i in
range(len(self.used_components))]
result = np.asarray([u1, u2, u3])
result = result[self.used_components].reshape(-1)
result = result.tolist()
return result
def w_function(self, x):
x = x[0][0]
w1, w2, w3 = [self.ws[i].find_mfx(x) if self.used_components[i] else None for i in
range(len(self.used_components))]
result = np.asarray([w1, w2, w3])
result = result[self.used_components].reshape(-1)
result = result.tolist()
return result
def build_problems(self):
self.us = [self.u1, self.u2, self.u3]
self.ws = [self.w1, self.w2, self.w3]
temp = sum(self.used_components)
if temp == 1:
return self.defuzz_not_none()
u_problem, w_problem = Problem(1, temp), Problem(1, temp)
u_universe, w_universe = self.universe_not_none()
u_problem.types[:] = Subset(u_universe, 1)
w_problem.types[:] = Subset(w_universe, 1)
u_problem.directions[:] = Problem.MAXIMIZE
w_problem.directions[:] = Problem.MAXIMIZE
u_problem.function, w_problem.function = self.u_function, self.w_function
return u_problem, w_problem
def universe_not_none(self):
if self.u1 is not None:
return self.u1.x, self.w1.x
if self.u2 is not None:
return self.u2.x, self.w2.x
return self.u3.x, self.w3.x
def defuzz_not_none(self):
if self.u1 is not None and self.used_components[0]:
return self.inf11.defuzz(), self.inf12.defuzz()
if self.u2 is not None and self.used_components[1]:
return self.inf21.defuzz(), self.inf22.defuzz()
# return self.inf31.sim.output['output_u'], self.inf31.sim.output['output_w']
return self.inf31.defuzz(), self.inf32.defuzz()
| [
"platypus.Problem",
"platypus.Subset",
"numpy.asarray",
"multiprocessing.cpu_count",
"platypus.NSGAII"
] | [((786, 801), 'platypus.NSGAII', 'NSGAII', (['problem'], {}), '(problem)\n', (792, 801), False, 'from platypus import Problem, Subset, NSGAII\n'), ((1216, 1240), 'numpy.asarray', 'np.asarray', (['[u1, u2, u3]'], {}), '([u1, u2, u3])\n', (1226, 1240), True, 'import numpy as np\n'), ((1568, 1592), 'numpy.asarray', 'np.asarray', (['[w1, w2, w3]'], {}), '([w1, w2, w3])\n', (1578, 1592), True, 'import numpy as np\n'), ((2087, 2108), 'platypus.Subset', 'Subset', (['u_universe', '(1)'], {}), '(u_universe, 1)\n', (2093, 2108), False, 'from platypus import Problem, Subset, NSGAII\n'), ((2138, 2159), 'platypus.Subset', 'Subset', (['w_universe', '(1)'], {}), '(w_universe, 1)\n', (2144, 2159), False, 'from platypus import Problem, Subset, NSGAII\n'), ((1965, 1981), 'platypus.Problem', 'Problem', (['(1)', 'temp'], {}), '(1, temp)\n', (1972, 1981), False, 'from platypus import Problem, Subset, NSGAII\n'), ((1983, 1999), 'platypus.Problem', 'Problem', (['(1)', 'temp'], {}), '(1, temp)\n', (1990, 1999), False, 'from platypus import Problem, Subset, NSGAII\n'), ((545, 556), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (554, 556), False, 'from multiprocessing import cpu_count\n')] |
import os
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing import sequence,text
from tensorflow.keras import models
from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D
import numpy as np
data_set = pd.read_csv('Dataset.csv', header = None)
data_set.columns = ['Text', 'Category']
train_set = data_set.sample(frac=0.8)
data_set.drop(train_set.index,axis=0,inplace=True)
valid_set = data_set.sample(frac=0.5)
data_set.drop(valid_set.index,axis=0,inplace=True)
test_set = data_set
CLASSES= {'CPU_Utilization':0,'Password_Reset':1,'Memory_Utilization':2}
top_tokens=2<PASSWORD>0
max_len=50
filters=64
dropout_rate=0.2
embedding_dimension=200
kernel_size=3
pool_size=3
def data_map(df):
return list(df['Text']),np.array(df['Category'].map(CLASSES))
train_text,train_labels = data_map(train_set)
valid_text,valid_labels=data_map(valid_set)
test_text,test_labels=data_map(test_set)
def embedding_matrix_conv(word_index, embedding_file_path, embedding_dimension):
embedding_matrix_comb = {}
with open(embedding_file_path,'r') as embed_file:
for token_entry in embed_file:
values = token_entry.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_comb[word] = coefs
num_words = min(len(word_index) + 1, top_tokens)
embedding_matrix = np.zeros((num_words, embedding_dimension))
for word, word_position in word_index.items():
if word_position >= top_tokens:
continue
embedding_vector = embedding_matrix_comb.get(word)
if embedding_vector is not None:
embedding_matrix[word_position] = embedding_vector
return embedding_matrix
tokenizer=text.Tokenizer (num_words=top_tokens)
tokenizer.fit_on_texts(train_text)
word_index=tokenizer.word_index
embedding_file_path = 'glove.6B.200d.txt'
def create_model():
model = models.Sequential()
features = min(len(word_index) + 1, top_tokens)
model.add(Embedding(input_dim=features,
output_dim=embedding_dimension,
input_length=max_len,
weights=[embedding_matrix_conv(word_index,
embedding_file_path, embedding_dimension)],trainable=True))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='he_normal',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='he_normal',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
optimizer = tf.keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
return model
train_process = tokenizer.texts_to_sequences(train_text)
train_process = sequence.pad_sequences(train_process, maxlen=max_len)
valid_process = tokenizer.texts_to_sequences(valid_text)
valid_process = sequence.pad_sequences(valid_process, maxlen=max_len)
test_process = tokenizer.texts_to_sequences(test_text)
test_process = sequence.pad_sequences(test_process, maxlen=max_len)
model = create_model()
model.summary()
checkpoint_path = "training_path/cp.ckpt"
checkpoint_directory = os.path.dirname(checkpoint_path)
callback_path = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1)
model.fit(train_process,
train_labels,
epochs=10,
validation_data=(test_process,test_labels),
callbacks=[callback_path])
model.save('model_saved/model')
input_dict={'embedding_9_input': valid_process[1:2]}
with open('sample_instance.json', 'w') as prediction_file:
json.dump(input_dict, prediction_file)
def predictClass():
try:
content= ['User requested to Change Password as expired']
result = {}
pred_process = tokenizer.texts_to_sequences(content)
pred_process = sequence.pad_sequences(pred_process, maxlen=max_len)
new_model = tf.keras.models.load_model('model_saved/model')
prediction = int(new_model.predict_classes(pred_process))
for key, value in CLASSES.items():
if value==prediction:
category=key
result["class"] = category
result = {"results": result}
result = json.dumps(result)
return result
except Exception as e:
return e
if __name__=='__main__':
predictClass()
| [
"pandas.read_csv",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Dropout",
"numpy.asarray",
"tensorflow.keras.optimizers.Adam",
"os.path.dirname",
"numpy.zeros",
"tensorflow.keras.preprocessing.text.Tokenizer",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"... | [((287, 326), 'pandas.read_csv', 'pd.read_csv', (['"""Dataset.csv"""'], {'header': 'None'}), "('Dataset.csv', header=None)\n", (298, 326), True, 'import pandas as pd\n'), ((1789, 1825), 'tensorflow.keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {'num_words': 'top_tokens'}), '(num_words=top_tokens)\n', (1803, 1825), False, 'from tensorflow.keras.preprocessing import sequence, text\n'), ((3155, 3208), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['train_process'], {'maxlen': 'max_len'}), '(train_process, maxlen=max_len)\n', (3177, 3208), False, 'from tensorflow.keras.preprocessing import sequence, text\n'), ((3283, 3336), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['valid_process'], {'maxlen': 'max_len'}), '(valid_process, maxlen=max_len)\n', (3305, 3336), False, 'from tensorflow.keras.preprocessing import sequence, text\n'), ((3408, 3460), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['test_process'], {'maxlen': 'max_len'}), '(test_process, maxlen=max_len)\n', (3430, 3460), False, 'from tensorflow.keras.preprocessing import sequence, text\n'), ((3567, 3599), 'os.path.dirname', 'os.path.dirname', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3582, 3599), False, 'import os\n'), ((3617, 3716), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'save_weights_only': '(True)', 'verbose': '(1)'}), '(filepath=checkpoint_path,\n save_weights_only=True, verbose=1)\n', (3651, 3716), True, 'import tensorflow as tf\n'), ((1432, 1474), 'numpy.zeros', 'np.zeros', (['(num_words, embedding_dimension)'], {}), '((num_words, embedding_dimension))\n', (1440, 1474), True, 'import numpy as np\n'), ((1968, 1987), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (1985, 1987), False, 'from tensorflow.keras import models\n'), ((2937, 2971), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (2961, 2971), True, 'import tensorflow as tf\n'), ((2329, 2355), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'dropout_rate'}), '(rate=dropout_rate)\n', (2336, 2355), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((2369, 2486), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'activation': '"""relu"""', 'bias_initializer': '"""he_normal"""', 'padding': '"""same"""'}), "(filters=filters, kernel_size=kernel_size, activation='relu',\n bias_initializer='he_normal', padding='same')\n", (2375, 2486), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((2560, 2593), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': 'pool_size'}), '(pool_size=pool_size)\n', (2572, 2593), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((2607, 2728), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(filters * 2)', 'kernel_size': 'kernel_size', 'activation': '"""relu"""', 'bias_initializer': '"""he_normal"""', 'padding': '"""same"""'}), "(filters=filters * 2, kernel_size=kernel_size, activation='relu',\n bias_initializer='he_normal', padding='same')\n", (2613, 2728), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((2802, 2826), 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (2824, 2826), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((2840, 2866), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'dropout_rate'}), '(rate=dropout_rate)\n', (2847, 2866), False, 'from tensorflow.keras.layers import Dense, Dropout, Embedding, Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((4263, 4315), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['pred_process'], {'maxlen': 'max_len'}), '(pred_process, maxlen=max_len)\n', (4285, 4315), False, 'from tensorflow.keras.preprocessing import sequence, text\n'), ((4336, 4383), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""model_saved/model"""'], {}), "('model_saved/model')\n", (4362, 4383), True, 'import tensorflow as tf\n'), ((1268, 1307), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (1278, 1307), True, 'import numpy as np\n')] |
import numpy as np
x = np.array([1, 2])
print(x.shape)
y = np.expand_dims(x, axis=0)
print(y.shape) | [
"numpy.array",
"numpy.expand_dims"
] | [((23, 39), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (31, 39), True, 'import numpy as np\n'), ((59, 84), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (73, 84), True, 'import numpy as np\n')] |
# pylint: disable=no-member, invalid-name, too-many-instance-attributes
"""
load raw EWD into memory
"""
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import os
from struct import unpack
import numpy as np
class EWD:
"""EIT raw/waveform data"""
def __init__(self, file_name):
"""
RAW data (.EWD) contains only data
"""
self.file_name = file_name
self.file_size = os.path.getsize(file_name)
# 256 measurement waveforms, 128 points per wave, 2 bytes per point
self.n_wave = 256
self.n_point = 128
self.n_data = self.n_wave * self.n_point
self.frame_size = self.n_data * 2 # signed short
self.n_frame = int(self.file_size / self.frame_size)
self.tot_data = int(self.file_size / 2)
raw = self.load_raw()
self.wave = self.demodulate(raw)
scale = 4096 / 65536 / 29.9 * 1000 / 1250
self.data = (self.wave[:, :256] + 1j * self.wave[:, 256:]) * scale
def load_raw(self):
"""load raw data"""
raw = np.zeros((self.n_frame, self.n_data), dtype=np.int)
with open(self.file_name, "rb") as fh:
for i in range(self.n_frame):
d = fh.read(self.frame_size)
raw[i] = unpack("{}h".format(self.n_data), d)
return raw
def demodulate(self, raw):
"""demodulate raw data into [re, im]"""
wave = np.zeros((self.n_frame, 2 * self.n_wave), dtype=np.double)
sin_rom = np.sin(2.0 * np.pi * np.arange(self.n_point) / self.n_point)
cos_rom = np.cos(2.0 * np.pi * np.arange(self.n_point) / self.n_point)
for i in range(self.n_frame):
dw = raw[i].reshape(self.n_wave, -1)
wave_re = np.sum(dw * sin_rom, axis=1)
wave_im = np.sum(dw * cos_rom, axis=1)
wave[i] = np.concatenate([wave_re, wave_im]) / (self.n_point / 2.0)
return wave
def to_erd(self, src, dst):
"""combine ERD and demodulate EWD to a new file"""
file_size = os.path.getsize(src)
header_size = 1024
data_size = 4096 # 2*256 doubles
frame_size = header_size + data_size
n_frame = int(file_size / frame_size)
assert n_frame == self.n_frame
with open(src, "rb") as fr, open(dst, "wb") as fw:
for i in range(n_frame):
h = fr.read(header_size)
fr.read(data_size) # skip data
d = self.wave[i].tobytes()
fw.write(h + d)
| [
"os.path.getsize",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"numpy.arange"
] | [((487, 513), 'os.path.getsize', 'os.path.getsize', (['file_name'], {}), '(file_name)\n', (502, 513), False, 'import os\n'), ((1123, 1174), 'numpy.zeros', 'np.zeros', (['(self.n_frame, self.n_data)'], {'dtype': 'np.int'}), '((self.n_frame, self.n_data), dtype=np.int)\n', (1131, 1174), True, 'import numpy as np\n'), ((1486, 1544), 'numpy.zeros', 'np.zeros', (['(self.n_frame, 2 * self.n_wave)'], {'dtype': 'np.double'}), '((self.n_frame, 2 * self.n_wave), dtype=np.double)\n', (1494, 1544), True, 'import numpy as np\n'), ((2105, 2125), 'os.path.getsize', 'os.path.getsize', (['src'], {}), '(src)\n', (2120, 2125), False, 'import os\n'), ((1812, 1840), 'numpy.sum', 'np.sum', (['(dw * sin_rom)'], {'axis': '(1)'}), '(dw * sin_rom, axis=1)\n', (1818, 1840), True, 'import numpy as np\n'), ((1863, 1891), 'numpy.sum', 'np.sum', (['(dw * cos_rom)'], {'axis': '(1)'}), '(dw * cos_rom, axis=1)\n', (1869, 1891), True, 'import numpy as np\n'), ((1914, 1948), 'numpy.concatenate', 'np.concatenate', (['[wave_re, wave_im]'], {}), '([wave_re, wave_im])\n', (1928, 1948), True, 'import numpy as np\n'), ((1584, 1607), 'numpy.arange', 'np.arange', (['self.n_point'], {}), '(self.n_point)\n', (1593, 1607), True, 'import numpy as np\n'), ((1663, 1686), 'numpy.arange', 'np.arange', (['self.n_point'], {}), '(self.n_point)\n', (1672, 1686), True, 'import numpy as np\n')] |
#-*- coding:utf-8 -*-
import numpy as np
import spiceminer as sm
def _stamp_angles_falloff(angles, falloff, resolution):
falloff = np.vectorize(falloff, otypes=[float])
m, n = resolution
d_phi = 2 * np.pi / float(m)
d_theta = np.pi / float(n)
stamp_shape = [
int(angles[1] / d_theta) * 2 + 1,
int(angles[0] / d_phi) * 2 + 1]
q, p = stamp_shape
q2, p2 = q // 2, p // 2
x_grid, y_grid = np.mgrid[-q2:q2 + 1, -p2:p2 + 1]
x_grid, y_grid = x_grid * d_phi, y_grid * d_theta
stamp = falloff(x_grid, y_grid)
return stamp, x_grid, y_grid
def _stamp_resample(stamp, angles, resolution):
import scipy.ndimage as ndimage
m, n = resolution
q, p = stamp.shape
d_phi = 2 * np.pi / float(m)
d_theta = np.pi / float(n)
d_phi_old = 2 * angles[0] / float(p)
d_theta_old = 2 * angles[1] / float(q)
zoom_phi = d_phi / d_phi_old
zoom_theta = d_theta / d_theta_old
return ndimage.zoom(stamp, zoom=(zoom_theta, zoom_phi), order=3)
def _apply_stamp(skymap, stamp, phi, theta):
umax = np.vectorize(max)
n, m = skymap.shape
q, p = stamp.shape
q2, p2 = q // 2, p // 2
x = int(np.interp(phi, [-np.pi, np.pi], [0, m]))
y = int(np.interp(theta, [-np.pi, np.pi], [0, n]))
uneven = p & 1, q & 1
bounds = np.array([
x - p2,
y - q2,
x + p2 + uneven[0],
y + q2 + uneven[1]
], dtype=int)
ldx, ldy = ldiffs = -bounds[:2].clip(max=0)
udx, udy = udiffs = -(np.array([m, n]) - bounds[2:]).clip(max=0)
lbx, lby = bounds[:2] + ldiffs
ubx, uby = bounds[2:] - udiffs
skymap[lby:uby, lbx:ubx] = umax(skymap[lby:uby, lbx:ubx], stamp[ldy: q - udy, ldx: p - udx])
if ldx:
skymap[lby:uby, -ldx:] = umax(skymap[lby:uby, -ldx:], stamp[ldy: q - udy, :ldx])
if udx:
skymap[lby:uby, :udx] = umax(skymap[lby:uby, :udx], stamp[ldy: q - udy, -udx:])
if ldy:
skymap[-ldy:, lbx:ubx] = umax(skymap[-ldy:, lbx:ubx], stamp[:ldy, ldx: p - udx])
if udy:
skymap[:udy, lbx:ubx] = umax(skymap[:udy, lbx:ubx], stamp[-udy:, ldx: p - udx])
class SkyMapper(object):
def __init__(self, parent, stamp, resolution):
'''
parent: Body
stamp: ndarray
resolution: 2-tuple of int
x, y
'''
self.parent = sm.Body(parent)
self.stamp = stamp
self.resolution = resolution
@classmethod
def ellipse(cls, parent, angles, falloff=None, resolution=(360, 180)):
if falloff is None:
falloff = lambda phi, theta: 1.0
stamp, x_dist, y_dist = _stamp_angles_falloff(angles, falloff, resolution)
mask = x_dist**2 / angles[0]**2.0 + y_dist**2.0 / angles[1]**2 > 1
stamp[mask] = 0
return cls(parent, stamp, resolution)
@classmethod
def rectangle(cls, parent, angles, falloff=None, resolution=(360, 180)):
if falloff is None:
falloff = lambda phi, theta: 1.0
stamp = _stamp_angles_falloff(angles, falloff, resolution)[0]
return cls(parent, stamp, resolution)
@classmethod
def customstamp(cls, parent, angles, stamp, resolution=(360, 180)):
stamp = _stamp_resample(stamp, angles, resolution)
return cls(parent, stamp, resolution, **kwargs)
def _make_skymap(self, vectors):
skymap = np.zeros(shape=self.resolution[::-1])
coords = sm.cartesian2sphere(vectors)
for phi, theta in coords[1:].T:
_apply_stamp(skymap, self.stamp, phi, theta - np.pi / 2)
return skymap
def fixed(self, times, offset=None, frame='ECLIPJ2000'):
_, matrices = self.parent.rotation(times, target=frame)
vectors = np.array([mat.dot(np.array([1,0,0])) for mat in matrices]).T
return self._make_skymap(vectors)
def tracking(self, times, target='SUN', frame='ECLIPJ2000'):
vectors = target.position(times, observer=self.parent, frame=frame)[1:]
return self._make_skymap(vectors)
class InstrumentSkyMapper(object):
def __init__(self, instrument, falloff=None, resolution=(360, 180)):
'''
parent: Body
resolution: 2-tuple of int
x, y
'''
self.parent = sm.Body(instrument)
if falloff is None:
falloff = lambda phi, theta: 1.0
shape, frame, boresight, bounds = self.parent.fov()
self.boresight = boresight
if shape in ('CIRCLE', 'POLYGON'):
angles = [sm.angle(boresight, bounds[:, 0])] * 2
elif shape == 'RECTANGLE':
va = bounds[:, 0] + (bounds[:, 1] - bounds[:, 0]) / 2
vb = bounds[:, 1] + (bounds[:, 2] - bounds[:, 1]) / 2
angles = [sm.angle(boresight, va), sm.angle(boresight, vb)]
else:
va, vb = bounds[:, 0], bounds[:, 1]
angles = [sm.angle(boresight, va). sm.angle(boresight, vb)]
self.stamp, x_grid, y_grid = _stamp_angles_falloff(angles, falloff, resolution)
if shape in ('CIRCLE', 'ELLIPSE'):
mask = (x_grid**2 / angles[0]**2.0 + y_grid**2.0 / angles[1]**2) > 1
self.stamp[mask] = 0
self.resolution = resolution
def _make_skymap(self, vectors):
skymap = np.zeros(shape=self.resolution[::-1])
coords = sm.cartesian2sphere(vectors)
for phi, theta in coords[1:].T:
_apply_stamp(skymap, self.stamp, phi, theta - np.pi / 2)
return skymap
def fixed(self, times, frame='ECLIPJ2000'):
_, matrices = self.parent.rotation(times, target=frame)
vectors = np.array([mat.dot(self.boresight) for mat in matrices]).T
return self._make_skymap(vectors)
| [
"spiceminer.angle",
"spiceminer.cartesian2sphere",
"numpy.array",
"numpy.zeros",
"numpy.interp",
"spiceminer.Body",
"numpy.vectorize",
"scipy.ndimage.zoom"
] | [((139, 176), 'numpy.vectorize', 'np.vectorize', (['falloff'], {'otypes': '[float]'}), '(falloff, otypes=[float])\n', (151, 176), True, 'import numpy as np\n'), ((954, 1011), 'scipy.ndimage.zoom', 'ndimage.zoom', (['stamp'], {'zoom': '(zoom_theta, zoom_phi)', 'order': '(3)'}), '(stamp, zoom=(zoom_theta, zoom_phi), order=3)\n', (966, 1011), True, 'import scipy.ndimage as ndimage\n'), ((1069, 1086), 'numpy.vectorize', 'np.vectorize', (['max'], {}), '(max)\n', (1081, 1086), True, 'import numpy as np\n'), ((1309, 1386), 'numpy.array', 'np.array', (['[x - p2, y - q2, x + p2 + uneven[0], y + q2 + uneven[1]]'], {'dtype': 'int'}), '([x - p2, y - q2, x + p2 + uneven[0], y + q2 + uneven[1]], dtype=int)\n', (1317, 1386), True, 'import numpy as np\n'), ((1174, 1213), 'numpy.interp', 'np.interp', (['phi', '[-np.pi, np.pi]', '[0, m]'], {}), '(phi, [-np.pi, np.pi], [0, m])\n', (1183, 1213), True, 'import numpy as np\n'), ((1227, 1268), 'numpy.interp', 'np.interp', (['theta', '[-np.pi, np.pi]', '[0, n]'], {}), '(theta, [-np.pi, np.pi], [0, n])\n', (1236, 1268), True, 'import numpy as np\n'), ((2331, 2346), 'spiceminer.Body', 'sm.Body', (['parent'], {}), '(parent)\n', (2338, 2346), True, 'import spiceminer as sm\n'), ((3349, 3386), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.resolution[::-1]'}), '(shape=self.resolution[::-1])\n', (3357, 3386), True, 'import numpy as np\n'), ((3404, 3432), 'spiceminer.cartesian2sphere', 'sm.cartesian2sphere', (['vectors'], {}), '(vectors)\n', (3423, 3432), True, 'import spiceminer as sm\n'), ((4228, 4247), 'spiceminer.Body', 'sm.Body', (['instrument'], {}), '(instrument)\n', (4235, 4247), True, 'import spiceminer as sm\n'), ((5230, 5267), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.resolution[::-1]'}), '(shape=self.resolution[::-1])\n', (5238, 5267), True, 'import numpy as np\n'), ((5285, 5313), 'spiceminer.cartesian2sphere', 'sm.cartesian2sphere', (['vectors'], {}), '(vectors)\n', (5304, 5313), True, 'import spiceminer as sm\n'), ((1499, 1515), 'numpy.array', 'np.array', (['[m, n]'], {}), '([m, n])\n', (1507, 1515), True, 'import numpy as np\n'), ((4481, 4514), 'spiceminer.angle', 'sm.angle', (['boresight', 'bounds[:, 0]'], {}), '(boresight, bounds[:, 0])\n', (4489, 4514), True, 'import spiceminer as sm\n'), ((4709, 4732), 'spiceminer.angle', 'sm.angle', (['boresight', 'va'], {}), '(boresight, va)\n', (4717, 4732), True, 'import spiceminer as sm\n'), ((4734, 4757), 'spiceminer.angle', 'sm.angle', (['boresight', 'vb'], {}), '(boresight, vb)\n', (4742, 4757), True, 'import spiceminer as sm\n'), ((3726, 3745), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (3734, 3745), True, 'import numpy as np\n'), ((4843, 4866), 'spiceminer.angle', 'sm.angle', (['boresight', 'va'], {}), '(boresight, va)\n', (4851, 4866), True, 'import spiceminer as sm\n')] |
"""Utility script to mock models for testing."""
from typing import List
import sclblonnx as so
from onnx import helper as xhelp
import sclblonnx._globals as glob
import numpy as np
import click
def create_stub_onnx_model(
onnx_model_path: str, output_path: str, dynamic_shape_values: dict
):
"""Create stub onnx model for tests with correct input and output shapes and names.
Args:
onnx_model_path: Path to the onnx model.
output_path: Path to the output mock model.
dynamic_shape_values: Map to specify dynamic dimension values.
"""
onnx_model = so.graph_from_file(onnx_model_path)
inputs = onnx_model.input
outputs = onnx_model.output
mock_graph = so.empty_graph()
inverse_data_dict = {value: key for key, value in glob.DATA_TYPES.items()}
dynamic_shape_map = {}
for input_ in inputs:
so.add_input(
mock_graph,
name=input_.name,
dimensions=[
dim.dim_param if dim.dim_param != "" else dim.dim_value
for dim in input_.type.tensor_type.shape.dim
],
data_type=inverse_data_dict[input_.type.tensor_type.elem_type],
)
for i, dim in enumerate(input_.type.tensor_type.shape.dim):
if dim.dim_param != "":
dynamic_shape_map[dim.dim_param] = (input_.name, i)
for output in outputs:
so.add_output(
mock_graph,
name=output.name,
dimensions=[
dim.dim_param if dim.dim_param != "" else dim.dim_value
for dim in output.type.tensor_type.shape.dim
],
data_type=inverse_data_dict[output.type.tensor_type.elem_type],
)
input_names = [inp.name for inp in inputs]
if output.name not in input_names:
build_output_shape_tensor_(
mock_graph,
output,
dynamic_shape_map,
f"dynamic_shape_{output.name}",
override=dynamic_shape_values,
)
node = so.node(
"ConstantOfShape",
inputs=[f"dynamic_shape_{output.name}"],
outputs=[output.name],
value=xhelp.make_tensor(
name=f"dynamic_shape_{output.name}_value",
data_type=output.type.tensor_type.elem_type,
dims=[1],
vals=[0],
),
name=f"ConstantOfShape_{output.name}",
)
so.add_node(mock_graph, node)
so.graph_to_file(mock_graph, output_path, onnx_opset_version=15)
def build_output_shape_tensor_(
graph, output, dynamic_shape_map, shape_name="dynamic_shape", override={}
):
"""Build output shape tensor for dynamic shape models.
Args:
graph: Graph to add the output shape tensor to.
output: Model output.
dynamic_shape_map: Map of input names to their dynamic shape indices.
shape_name: Name of the output shape tensor.
"""
dimensions_retrieved = []
for i, dim in enumerate(output.type.tensor_type.shape.dim):
if " + " in dim.dim_param:
dim1, dim2 = dim.dim_param.split(" + ")
create_dim_variable_(
graph,
shape_name,
dim1,
override.get(dim1, dim.dim_value),
i,
dynamic_shape_map,
postfix="1",
)
create_dim_variable_(
graph,
shape_name,
dim2,
override.get(dim2, dim.dim_value),
i,
dynamic_shape_map,
postfix="2",
)
so.add_node(
graph,
so.node(
"Add",
inputs=[f"{shape_name}_{i}_1", f"{shape_name}_{i}_2"],
outputs=[f"{shape_name}_{i}"],
),
)
else:
create_dim_variable_(
graph,
shape_name,
dim.dim_param,
override.get(dim.dim_param, dim.dim_value),
i,
dynamic_shape_map,
)
dimensions_retrieved.append(f"{shape_name}_{i}")
node = so.node(
"Concat",
inputs=dimensions_retrieved,
outputs=[f"{shape_name}"],
axis=0,
)
so.add_node(graph, node)
def create_dim_variable_(
graph, shape_name, dim_param, dim_value, dim_id, dynamic_shape_map, postfix=None
):
"""Create a dimension variable for a dynamic shape model.
Args:
graph: Graph to add the dimension variable to.
shape_name: Name of the output shape tensor.
dim_param: Dimension parameter name.
dim_value: Dimension value.
dim_id: Index of the dimension variable.
dynamic_shape_map: Map of dynamic axes names to their inputs indices.
"""
if dim_param != "" and dim_param in dynamic_shape_map:
node1 = so.node(
"Shape",
inputs=[dynamic_shape_map[dim_param][0]],
outputs=[
f"{shape_name}_{dim_id}"
if postfix is None
else f"{shape_name}_{dim_id}_{postfix}"
],
start=dynamic_shape_map[dim_param][1],
end=dynamic_shape_map[dim_param][1] + 1,
)
so.add_node(graph, node1)
else:
so.add_constant(
graph,
f"{shape_name}_{dim_id}"
if postfix is None
else f"{shape_name}_{dim_id}_{postfix}",
np.array(
[dim_value if dim_value != 0 else 1],
dtype=np.int64,
),
data_type="INT64",
)
@click.command()
@click.option(
"-d",
"--dynamic-dim",
type=str,
multiple=True,
help="Specify dynamic dimension in format `<dim name>:<dim value>`.",
)
@click.option("-m", "--onnx-model-path", required=True, type=str)
@click.option("-o", "--output-path", required=True, type=str)
def main(onnx_model_path: str, output_path: str, dynamic_dim: List[str]):
"""Create stub onnx model for tests with correct input and output shapes and names."""
dynamic_dims_map = {}
for dim in dynamic_dim:
dim_name, dim_value = dim.split(":")
dynamic_dims_map[dim_name] = int(dim_value)
create_stub_onnx_model(onnx_model_path, output_path, dynamic_dims_map)
if __name__ == "__main__":
main()
| [
"sclblonnx.graph_from_file",
"sclblonnx.add_output",
"sclblonnx._globals.DATA_TYPES.items",
"click.option",
"sclblonnx.graph_to_file",
"numpy.array",
"sclblonnx.node",
"sclblonnx.empty_graph",
"sclblonnx.add_node",
"onnx.helper.make_tensor",
"sclblonnx.add_input",
"click.command"
] | [((5994, 6009), 'click.command', 'click.command', ([], {}), '()\n', (6007, 6009), False, 'import click\n'), ((6012, 6147), 'click.option', 'click.option', (['"""-d"""', '"""--dynamic-dim"""'], {'type': 'str', 'multiple': '(True)', 'help': '"""Specify dynamic dimension in format `<dim name>:<dim value>`."""'}), "('-d', '--dynamic-dim', type=str, multiple=True, help=\n 'Specify dynamic dimension in format `<dim name>:<dim value>`.')\n", (6024, 6147), False, 'import click\n'), ((6174, 6238), 'click.option', 'click.option', (['"""-m"""', '"""--onnx-model-path"""'], {'required': '(True)', 'type': 'str'}), "('-m', '--onnx-model-path', required=True, type=str)\n", (6186, 6238), False, 'import click\n'), ((6241, 6301), 'click.option', 'click.option', (['"""-o"""', '"""--output-path"""'], {'required': '(True)', 'type': 'str'}), "('-o', '--output-path', required=True, type=str)\n", (6253, 6301), False, 'import click\n'), ((614, 649), 'sclblonnx.graph_from_file', 'so.graph_from_file', (['onnx_model_path'], {}), '(onnx_model_path)\n', (632, 649), True, 'import sclblonnx as so\n'), ((734, 750), 'sclblonnx.empty_graph', 'so.empty_graph', ([], {}), '()\n', (748, 750), True, 'import sclblonnx as so\n'), ((2659, 2723), 'sclblonnx.graph_to_file', 'so.graph_to_file', (['mock_graph', 'output_path'], {'onnx_opset_version': '(15)'}), '(mock_graph, output_path, onnx_opset_version=15)\n', (2675, 2723), True, 'import sclblonnx as so\n'), ((4464, 4549), 'sclblonnx.node', 'so.node', (['"""Concat"""'], {'inputs': 'dimensions_retrieved', 'outputs': "[f'{shape_name}']", 'axis': '(0)'}), "('Concat', inputs=dimensions_retrieved, outputs=[f'{shape_name}'],\n axis=0)\n", (4471, 4549), True, 'import sclblonnx as so\n'), ((4595, 4619), 'sclblonnx.add_node', 'so.add_node', (['graph', 'node'], {}), '(graph, node)\n', (4606, 4619), True, 'import sclblonnx as so\n'), ((899, 1137), 'sclblonnx.add_input', 'so.add_input', (['mock_graph'], {'name': 'input_.name', 'dimensions': "[(dim.dim_param if dim.dim_param != '' else dim.dim_value) for dim in\n input_.type.tensor_type.shape.dim]", 'data_type': 'inverse_data_dict[input_.type.tensor_type.elem_type]'}), "(mock_graph, name=input_.name, dimensions=[(dim.dim_param if \n dim.dim_param != '' else dim.dim_value) for dim in input_.type.\n tensor_type.shape.dim], data_type=inverse_data_dict[input_.type.\n tensor_type.elem_type])\n", (911, 1137), True, 'import sclblonnx as so\n'), ((1448, 1687), 'sclblonnx.add_output', 'so.add_output', (['mock_graph'], {'name': 'output.name', 'dimensions': "[(dim.dim_param if dim.dim_param != '' else dim.dim_value) for dim in\n output.type.tensor_type.shape.dim]", 'data_type': 'inverse_data_dict[output.type.tensor_type.elem_type]'}), "(mock_graph, name=output.name, dimensions=[(dim.dim_param if \n dim.dim_param != '' else dim.dim_value) for dim in output.type.\n tensor_type.shape.dim], data_type=inverse_data_dict[output.type.\n tensor_type.elem_type])\n", (1461, 1687), True, 'import sclblonnx as so\n'), ((5225, 5472), 'sclblonnx.node', 'so.node', (['"""Shape"""'], {'inputs': '[dynamic_shape_map[dim_param][0]]', 'outputs': "[f'{shape_name}_{dim_id}' if postfix is None else\n f'{shape_name}_{dim_id}_{postfix}']", 'start': 'dynamic_shape_map[dim_param][1]', 'end': '(dynamic_shape_map[dim_param][1] + 1)'}), "('Shape', inputs=[dynamic_shape_map[dim_param][0]], outputs=[\n f'{shape_name}_{dim_id}' if postfix is None else\n f'{shape_name}_{dim_id}_{postfix}'], start=dynamic_shape_map[dim_param]\n [1], end=dynamic_shape_map[dim_param][1] + 1)\n", (5232, 5472), True, 'import sclblonnx as so\n'), ((5611, 5636), 'sclblonnx.add_node', 'so.add_node', (['graph', 'node1'], {}), '(graph, node1)\n', (5622, 5636), True, 'import sclblonnx as so\n'), ((806, 829), 'sclblonnx._globals.DATA_TYPES.items', 'glob.DATA_TYPES.items', ([], {}), '()\n', (827, 829), True, 'import sclblonnx._globals as glob\n'), ((2624, 2653), 'sclblonnx.add_node', 'so.add_node', (['mock_graph', 'node'], {}), '(mock_graph, node)\n', (2635, 2653), True, 'import sclblonnx as so\n'), ((5831, 5893), 'numpy.array', 'np.array', (['[dim_value if dim_value != 0 else 1]'], {'dtype': 'np.int64'}), '([dim_value if dim_value != 0 else 1], dtype=np.int64)\n', (5839, 5893), True, 'import numpy as np\n'), ((3927, 4031), 'sclblonnx.node', 'so.node', (['"""Add"""'], {'inputs': "[f'{shape_name}_{i}_1', f'{shape_name}_{i}_2']", 'outputs': "[f'{shape_name}_{i}']"}), "('Add', inputs=[f'{shape_name}_{i}_1', f'{shape_name}_{i}_2'],\n outputs=[f'{shape_name}_{i}'])\n", (3934, 4031), True, 'import sclblonnx as so\n'), ((2309, 2439), 'onnx.helper.make_tensor', 'xhelp.make_tensor', ([], {'name': 'f"""dynamic_shape_{output.name}_value"""', 'data_type': 'output.type.tensor_type.elem_type', 'dims': '[1]', 'vals': '[0]'}), "(name=f'dynamic_shape_{output.name}_value', data_type=\n output.type.tensor_type.elem_type, dims=[1], vals=[0])\n", (2326, 2439), True, 'from onnx import helper as xhelp\n')] |
"""nlp.py: Classes for working with NLP data
"""
__author__ = "<NAME>"
__license__ = "BSD"
__email__ = "<EMAIL>"
import nltk
import numpy as np
import unidecode
import pandas as pd
class PreprocessPipeline:
CACHE = {}
def __init__(self, df, language, vocab={}, copy=True, log=False, custom_split=None, min_words=1,
max_words=128, min_word_count=5, column_name='text', mask_column='attention_mask',
padding=128, padding_id=0, start_token_id=1, end_token_id=2):
self._df = df
self._vocab = vocab.copy()
self._log = log
self._custom_split = custom_split
self._min_words = min_words
self._max_words = max_words
self._min_word_count = min_word_count
self._column_name = column_name
self._mask_column = mask_column
self._padding = padding
self._padding_id = padding_id
self._end_token_id = end_token_id
self._start_token_id = start_token_id
self._id = f"{type(self._df)}_{id(self._df)}_{min_words}_{max_words}_{min_word_count}_{column_name}_{mask_column}_{padding}_{padding_id}_{end_token_id}"
assert end_token_id > padding_id, 'End token id > padding id'
if copy:
self._df = self._df.copy()
self._language = language
def _split_dataframe(self, functor):
newDF = pd.concat([pd.Series(row['sid'], functor(row[self._column_name]))
for _, row in self._df.iterrows()]).reset_index()
newDF = newDF.rename(columns={'index': self._column_name, 0: "sid"})
newDF = newDF.merge(self._df[['target', 'sid']], on="sid", how='inner')
return newDF
def split_sentences(self):
if self._custom_split is None:
self._df = self._split_dataframe(nltk.sent_tokenize)
else:
def _tokenize(s):
s = nltk.sent_tokenize(s)
s = [split for s1 in s for split in s1.split(self._custom_split)]
return s
self._df = self._split_dataframe(_tokenize)
return self
def split_max_word_sentences(self):
def _chunks(s):
return [' '.join(s[i : i+self._max_words]) for i in range(0, len(s), self._max_words)]
self._df = self._split_dataframe(_chunks)
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: s.split(' '))
return self
def lower(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: s.lower())
return self
def tokenize(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: nltk.word_tokenize(s))
return self
def tokenize_char(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [c for c in s])
return self
def length(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [len(w) for w in s])
return self
def stem(self):
stemmer = nltk.SnowballStemmer(self._language)
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [stemmer.stem(w) for w in s])
return self
def pos_tag(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [p for w, p in nltk.pos_tag(s)])
return self
def remove_punctuation(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [w for w in s if w.isalnum()])
return self
def remove_diacritics(self):
self._df[self._column_name] = self._df[self._column_name].apply(unidecode.unidecode)
return self
def remove_stopwords(self):
stopwords = nltk.corpus.stopwords.words(self._language)
self._df[self._column_name] = self._df[self._column_name].apply(
lambda s: [w for w in s if w not in stopwords])
return self
def only_stopwords(self):
stopwords = nltk.corpus.stopwords.words(self._language)
self._df[self._column_name] = self._df[self._column_name].apply(
lambda s: [w for w in s if w in stopwords])
return self
def convert_to_phonames(self):
arpabet = nltk.corpus.cmudict.dict()
# Vowel lexical stress in cmudict: 0 — No stress, 1 — Primary stress, 2 — Secondary stress
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [arpabet[w][0] for w in s if w in arpabet])
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: [w for words in s for w in words])
return self
def build_vocabulary(self):
vocab_count = {}
for _, row in self._df.iterrows():
for w in row[self._column_name]:
if w not in vocab_count:
vocab_count[w] = 1
else:
vocab_count[w] += 1
if len(self._vocab) == 0:
self._vocab['<p>'] = self._padding_id
self._vocab['<cls>'] = self._start_token_id
self._vocab['<end>'] = self._end_token_id
ids = max(self._vocab.values())
for w, count in vocab_count.items():
if w not in self._vocab and count > self._min_word_count:
ids += 1
self._vocab[w] = ids
return self
def to_vocabulary_ids(self, default_value=0):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: np.array([self._vocab.get(w, default_value) for w in s], dtype=np.int))
return self
def add_mask(self):
self._df[self._mask_column] = self._df[self._column_name].apply(lambda s: np.ones_like(s))
return self
def add_start_token(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: np.concatenate(([self._start_token_id], s)))
if self._mask_column in self._df:
self._df[self._mask_column] = self._df[self._mask_column].apply(lambda s: np.concatenate(([1], s)))
return self
def add_end_token(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: np.concatenate((s, [self._end_token_id])))
if self._mask_column in self._df:
self._df[self._mask_column] = self._df[self._mask_column].apply(lambda s: np.concatenate((s, [1])))
return self
def padding(self):
self._df[self._column_name] = self._df[self._column_name].apply(
lambda s: np.pad(s, (0, self._padding - len(s)), constant_values=self._padding_id)
if len(s) < self._padding else np.resize(s, self._padding))
if self._mask_column in self._df:
self._df[self._mask_column] = self._df[self._mask_column].apply(
lambda s: np.pad(s, (0, self._padding - len(s)), constant_values=0)
if len(s) < self._padding else np.resize(s, self._padding))
return self
def filter_rows(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: pd.NA if len(s) < self._min_words else s)
self._df = self._df.dropna().reset_index()
return self
def remove_pad_ids(self, default_value=0):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: np.array([w for w in s if w != default_value], dtype=np.int))
return self
def join_words(self):
self._df[self._column_name] = self._df[self._column_name].apply(lambda s: ''.join([str(w) + ' ' for w in s]))
return self
@property
def DF(self):
return self._df
@property
def VOCAB(self):
return self._vocab
def _process(self, pipeline:list):
preprocess = self
for func_name in pipeline:
func = getattr(PreprocessPipeline, func_name)
preprocess = func(preprocess)
return preprocess
def process(self, pipeline: list):
preprocess = self
cache_ind = [i for i, op in enumerate(pipeline) if op == 'cache']
done = []
last_cid = 0
for cid in cache_ind:
to_do = pipeline[last_cid:cid]
last_cid = cid+1
done += to_do
data_id = f"{self._id}_{'_'.join(done)}"
if data_id in PreprocessPipeline.CACHE:
if self._log:
print(f'Loading pipeline cached {data_id}...')
preprocess = PreprocessPipeline.CACHE[data_id]
else:
preprocess = self._process(to_do)
if self._log:
print(f'Saving to pipeline cache {data_id}...')
PreprocessPipeline.CACHE[data_id] = preprocess
preprocess = preprocess._process(pipeline[last_cid:])
return preprocess
| [
"numpy.ones_like",
"nltk.pos_tag",
"nltk.corpus.stopwords.words",
"nltk.word_tokenize",
"nltk.SnowballStemmer",
"nltk.sent_tokenize",
"numpy.array",
"numpy.resize",
"numpy.concatenate",
"nltk.corpus.cmudict.dict"
] | [((3039, 3075), 'nltk.SnowballStemmer', 'nltk.SnowballStemmer', (['self._language'], {}), '(self._language)\n', (3059, 3075), False, 'import nltk\n'), ((3735, 3778), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['self._language'], {}), '(self._language)\n', (3762, 3778), False, 'import nltk\n'), ((3983, 4026), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['self._language'], {}), '(self._language)\n', (4010, 4026), False, 'import nltk\n'), ((4230, 4256), 'nltk.corpus.cmudict.dict', 'nltk.corpus.cmudict.dict', ([], {}), '()\n', (4254, 4256), False, 'import nltk\n'), ((1886, 1907), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['s'], {}), '(s)\n', (1904, 1907), False, 'import nltk\n'), ((2663, 2684), 'nltk.word_tokenize', 'nltk.word_tokenize', (['s'], {}), '(s)\n', (2681, 2684), False, 'import nltk\n'), ((5673, 5688), 'numpy.ones_like', 'np.ones_like', (['s'], {}), '(s)\n', (5685, 5688), True, 'import numpy as np\n'), ((5824, 5867), 'numpy.concatenate', 'np.concatenate', (['([self._start_token_id], s)'], {}), '(([self._start_token_id], s))\n', (5838, 5867), True, 'import numpy as np\n'), ((6155, 6196), 'numpy.concatenate', 'np.concatenate', (['(s, [self._end_token_id])'], {}), '((s, [self._end_token_id]))\n', (6169, 6196), True, 'import numpy as np\n'), ((7288, 7348), 'numpy.array', 'np.array', (['[w for w in s if w != default_value]'], {'dtype': 'np.int'}), '([w for w in s if w != default_value], dtype=np.int)\n', (7296, 7348), True, 'import numpy as np\n'), ((5997, 6021), 'numpy.concatenate', 'np.concatenate', (['([1], s)'], {}), '(([1], s))\n', (6011, 6021), True, 'import numpy as np\n'), ((6326, 6350), 'numpy.concatenate', 'np.concatenate', (['(s, [1])'], {}), '((s, [1]))\n', (6340, 6350), True, 'import numpy as np\n'), ((6607, 6634), 'numpy.resize', 'np.resize', (['s', 'self._padding'], {}), '(s, self._padding)\n', (6616, 6634), True, 'import numpy as np\n'), ((3329, 3344), 'nltk.pos_tag', 'nltk.pos_tag', (['s'], {}), '(s)\n', (3341, 3344), False, 'import nltk\n'), ((6886, 6913), 'numpy.resize', 'np.resize', (['s', 'self._padding'], {}), '(s, self._padding)\n', (6895, 6913), True, 'import numpy as np\n')] |
# Third-party imports
import numpy as np
import pandas as pd
import pytest
# First-party imports
from gluonts.model.forecast import QuantileForecast, SampleForecast
SAMPLES = np.arange(0, 101).reshape(-1, 1) / 100.0
QUANTILES = SAMPLES[1:-1, 0]
START_DATE = pd.Timestamp(2017, 1, 1, 12)
FREQ = '1D'
FORECASTS = {
'QuantileForecast': QuantileForecast(
forecast_arrays=QUANTILES.reshape(-1, 1),
start_date=START_DATE,
forecast_keys=QUANTILES.tolist(),
freq=FREQ,
),
'SampleForecast': SampleForecast(
samples=SAMPLES.reshape(len(SAMPLES), 1),
start_date=START_DATE,
freq=FREQ,
),
}
@pytest.mark.parametrize("fcst_cls", FORECASTS.keys())
def test_Forecast(fcst_cls):
fcst = FORECASTS[fcst_cls]
num_samples, pred_length = SAMPLES.shape
# quantiles = [x/float(num_samples-1) for x in range(0, num_samples)]
for q_value in QUANTILES:
q_str = str(q_value)
quantile_str = 'p{:02d}'.format(int(round(q_value * 100)))
for q in [q_value, q_str, quantile_str]:
quant_pred = fcst.quantile(q)
assert (
np.abs(quant_pred - q_value).reshape((1,)) < 1e-6
), "Expected {} quantile {}. Obtained {}.".format(
q_value, q_value, quant_pred
)
assert fcst.prediction_length == 1
assert len(fcst.index) == pred_length
assert fcst.index[0] == pd.Timestamp(START_DATE)
| [
"numpy.abs",
"pandas.Timestamp",
"numpy.arange"
] | [((260, 288), 'pandas.Timestamp', 'pd.Timestamp', (['(2017)', '(1)', '(1)', '(12)'], {}), '(2017, 1, 1, 12)\n', (272, 288), True, 'import pandas as pd\n'), ((1427, 1451), 'pandas.Timestamp', 'pd.Timestamp', (['START_DATE'], {}), '(START_DATE)\n', (1439, 1451), True, 'import pandas as pd\n'), ((177, 194), 'numpy.arange', 'np.arange', (['(0)', '(101)'], {}), '(0, 101)\n', (186, 194), True, 'import numpy as np\n'), ((1146, 1174), 'numpy.abs', 'np.abs', (['(quant_pred - q_value)'], {}), '(quant_pred - q_value)\n', (1152, 1174), True, 'import numpy as np\n')] |
# ALP4lib: A Python module to control Vialux DMDs
# https://github.com/wavefrontshaping/ALP4lib
# by <NAME>
import numpy as np
import ALP4
import time
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
# Load the Vialux .dll
DMD = ALP4(version = conf['dmd']['alp4ver'], libDir = conf['dmd']['alp4dir'])
# Initialize the device
DMD.Initialize()
# Binary amplitude image (0 or 1)
bitDepth = 1
imgBlack = np.zeros([DMD.nSizeY,DMD.nSizeX])
imgWhite = np.ones([DMD.nSizeY,DMD.nSizeX])*(2**8-1)
#imgSeq = np.concatenate([imgBlack.ravel(),imgWhite.ravel()])
# Generate testing patterns
imgNum = 10
imgTime = 100000 # microsec
imgNew = np.zeros([DMD.nSizeY,DMD.nSizeX])
imgSeq = np.array([])
for i in range(imgNum):
for x in range(DMD.nSizeX):
for y in range(DMD.nSizeY):
if ((x-i*100)**2 + (y-DMD.nSizeY//2)**2) < 2500:
imgNew[y,x] = (2**8-1)
else:
imgNew[y,x] = 0
imgSeq = np.concatenate([imgSeq, imgNew.ravel()])
# Allocate the onboard memory for the image sequence
DMD.SeqAlloc(nbImg = imgNum, bitDepth = bitDepth)
# Send the image sequence as a 1D list/array/numpy array
DMD.SeqPut(imgData = imgSeq)
# Set image rate
DMD.SetTiming(illuminationTime = imgTime)
# Show sequence is ready
print('Ready')
# Use ALP external trigger mode
DMD.Halt()
DMD.ProjControl(controlType=ALP_PROJ_MODE, value=ALP_MASTER)
DMD.ProjControl(controlType=ALP_PROJ_STEP, value=ALP_EDGE_RISING)
# Run the sequence
DMD.Run()
# Timeout
time.sleep(30)
# Stop the sequence display
DMD.Halt()
# Free the sequence from the onboard memory
DMD.FreeSeq()
# De-allocate the device
DMD.Free()
| [
"configparser.ConfigParser",
"numpy.ones",
"time.sleep",
"numpy.array",
"numpy.zeros",
"ALP4"
] | [((198, 212), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (210, 212), False, 'from configparser import ConfigParser\n'), ((269, 336), 'ALP4', 'ALP4', ([], {'version': "conf['dmd']['alp4ver']", 'libDir': "conf['dmd']['alp4dir']"}), "(version=conf['dmd']['alp4ver'], libDir=conf['dmd']['alp4dir'])\n", (273, 336), False, 'import ALP4\n'), ((440, 474), 'numpy.zeros', 'np.zeros', (['[DMD.nSizeY, DMD.nSizeX]'], {}), '([DMD.nSizeY, DMD.nSizeX])\n', (448, 474), True, 'import numpy as np\n'), ((668, 702), 'numpy.zeros', 'np.zeros', (['[DMD.nSizeY, DMD.nSizeX]'], {}), '([DMD.nSizeY, DMD.nSizeX])\n', (676, 702), True, 'import numpy as np\n'), ((711, 723), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (719, 723), True, 'import numpy as np\n'), ((1519, 1533), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1529, 1533), False, 'import time\n'), ((485, 518), 'numpy.ones', 'np.ones', (['[DMD.nSizeY, DMD.nSizeX]'], {}), '([DMD.nSizeY, DMD.nSizeX])\n', (492, 518), True, 'import numpy as np\n')] |
from typing import List
import numpy
import pytest
from celery.result import AsyncResult
from openff.bespokefit.schema.tasks import HessianTask, OptimizationTask, Torsion1DTask
from openff.toolkit.topology import Molecule
from pydantic import parse_raw_as
from qcelemental.models import AtomicResult, AtomicResultProperties, DriverEnum
from qcelemental.models.common_models import Model, Provenance
from beflow.services.qcgenerator import worker
from beflow.services.qcgenerator.app import _retrieve_qc_result
from beflow.services.qcgenerator.models import (
QCGeneratorGETResponse,
QCGeneratorPOSTBody,
QCGeneratorPOSTResponse,
)
from beflow.tests.mocking.celery import mock_celery_task
@pytest.fixture()
def mock_atomic_result() -> AtomicResult:
molecule: Molecule = Molecule.from_smiles("C")
molecule.generate_conformers(n_conformers=1)
return AtomicResult(
molecule=molecule.to_qcschema(),
driver=DriverEnum.hessian,
model=Model(method="rdkit", basis=None),
return_result=5.2,
success=True,
provenance=Provenance(creator="pytest"),
properties=AtomicResultProperties(),
)
@pytest.mark.parametrize(
"task_status, task_result, expected_state",
[
("PENDING", {}, "waiting"),
("STARTED", {}, "running"),
("FAILURE", {"error_message": "error"}, "errored"),
],
)
def test_retrieve_qc_result_pending_running_errored(
redis_connection, monkeypatch, task_status, task_result, expected_state
):
monkeypatch.setattr(
AsyncResult,
"_get_task_meta",
lambda self: {"status": task_status, "result": task_result},
)
redis_connection.hset("qcgenerator:types", "1", "torsion1d")
result = QCGeneratorGETResponse.parse_obj(_retrieve_qc_result("1", True))
assert result.qc_calc_status == expected_state
assert result.qc_calc_result is None
assert result.qc_calc_type == "torsion1d"
assert result.qc_calc_id == "1"
def test_retrieve_qc_result_success(
qcgenerator_client, redis_connection, monkeypatch, mock_atomic_result
):
monkeypatch.setattr(
AsyncResult,
"_get_task_meta",
lambda self: {"status": "SUCCESS", "result": mock_atomic_result.json()},
)
redis_connection.hset("qcgenerator:types", "1", "hessian")
result = QCGeneratorGETResponse.parse_obj(_retrieve_qc_result("1", True))
assert result.qc_calc_status == "success"
assert result.qc_calc_result is not None
assert result.qc_calc_type == "hessian"
assert result.qc_calc_id == "1"
assert result.qc_calc_result.driver == DriverEnum.hessian
assert numpy.isclose(result.qc_calc_result.return_result, 5.2)
def test_get_qc_result(
qcgenerator_client, redis_connection, monkeypatch, mock_atomic_result
):
monkeypatch.setattr(
AsyncResult,
"_get_task_meta",
lambda self: {"status": "SUCCESS", "result": mock_atomic_result.json()},
)
redis_connection.hset("qcgenerator:types", "1", "hessian")
request = qcgenerator_client.get("/qc-calc/1")
request.raise_for_status()
result = QCGeneratorGETResponse.parse_raw(request.text)
assert result.qc_calc_status == "success"
assert result.qc_calc_result is not None
assert result.qc_calc_type == "hessian"
assert result.qc_calc_id == "1"
assert result.qc_calc_result.driver == DriverEnum.hessian
assert numpy.isclose(result.qc_calc_result.return_result, 5.2)
@pytest.mark.parametrize(
"task, compute_function",
[
(
Torsion1DTask(
smiles="[CH2:1][CH2:2]",
central_bond=(1, 2),
program="rdkit",
model=Model(method="uff", basis=None),
),
"compute_torsion_drive",
),
(
OptimizationTask(
smiles="[CH2:1][CH2:2]",
n_conformers=1,
program="rdkit",
model=Model(method="uff", basis=None),
),
"compute_optimization",
),
(
HessianTask(
smiles="[CH2:1][CH2:2]",
program="rdkit",
model=Model(method="uff", basis=None),
),
"compute_hessian",
),
],
)
def test_post_qc_result(
qcgenerator_client, redis_connection, monkeypatch, task, compute_function
):
submitted_task_kwargs = mock_celery_task(worker, compute_function, monkeypatch)
request = qcgenerator_client.post(
"/qc-calc", data=QCGeneratorPOSTBody(input_schema=task).json()
)
request.raise_for_status()
assert submitted_task_kwargs["task_json"] == task.json()
assert redis_connection.hget("qcgenerator:types", "1").decode() == task.type
result = QCGeneratorPOSTResponse.parse_raw(request.text)
assert result.qc_calc_id == "1"
@pytest.mark.parametrize("include_result", [True, False])
def test_get_qc_results(
qcgenerator_client,
redis_connection,
monkeypatch,
mock_atomic_result,
include_result,
):
monkeypatch.setattr(
AsyncResult,
"_get_task_meta",
lambda self: {"status": "SUCCESS", "result": mock_atomic_result.json()},
)
redis_connection.hset("qcgenerator:types", "1", "hessian")
redis_connection.hset("qcgenerator:types", "2", "hessian")
request = qcgenerator_client.get(
f"/qc-calcs?ids=1&ids=2&results={str(include_result).lower()}"
)
request.raise_for_status()
results = parse_raw_as(List[QCGeneratorGETResponse], request.text)
assert len(results) == 2
for i, result in enumerate(results):
assert result.qc_calc_status == "success"
assert (result.qc_calc_result is not None) == include_result
assert result.qc_calc_type == "hessian"
assert result.qc_calc_id == f"{i + 1}"
| [
"beflow.services.qcgenerator.models.QCGeneratorPOSTBody",
"numpy.isclose",
"qcelemental.models.common_models.Model",
"beflow.services.qcgenerator.app._retrieve_qc_result",
"qcelemental.models.common_models.Provenance",
"qcelemental.models.AtomicResultProperties",
"openff.toolkit.topology.Molecule.from_s... | [((705, 721), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (719, 721), False, 'import pytest\n'), ((1168, 1355), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""task_status, task_result, expected_state"""', "[('PENDING', {}, 'waiting'), ('STARTED', {}, 'running'), ('FAILURE', {\n 'error_message': 'error'}, 'errored')]"], {}), "('task_status, task_result, expected_state', [(\n 'PENDING', {}, 'waiting'), ('STARTED', {}, 'running'), ('FAILURE', {\n 'error_message': 'error'}, 'errored')])\n", (1191, 1355), False, 'import pytest\n'), ((4887, 4943), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""include_result"""', '[True, False]'], {}), "('include_result', [True, False])\n", (4910, 4943), False, 'import pytest\n'), ((790, 815), 'openff.toolkit.topology.Molecule.from_smiles', 'Molecule.from_smiles', (['"""C"""'], {}), "('C')\n", (810, 815), False, 'from openff.toolkit.topology import Molecule\n'), ((2652, 2707), 'numpy.isclose', 'numpy.isclose', (['result.qc_calc_result.return_result', '(5.2)'], {}), '(result.qc_calc_result.return_result, 5.2)\n', (2665, 2707), False, 'import numpy\n'), ((3132, 3178), 'beflow.services.qcgenerator.models.QCGeneratorGETResponse.parse_raw', 'QCGeneratorGETResponse.parse_raw', (['request.text'], {}), '(request.text)\n', (3164, 3178), False, 'from beflow.services.qcgenerator.models import QCGeneratorGETResponse, QCGeneratorPOSTBody, QCGeneratorPOSTResponse\n'), ((3425, 3480), 'numpy.isclose', 'numpy.isclose', (['result.qc_calc_result.return_result', '(5.2)'], {}), '(result.qc_calc_result.return_result, 5.2)\n', (3438, 3480), False, 'import numpy\n'), ((4439, 4494), 'beflow.tests.mocking.celery.mock_celery_task', 'mock_celery_task', (['worker', 'compute_function', 'monkeypatch'], {}), '(worker, compute_function, monkeypatch)\n', (4455, 4494), False, 'from beflow.tests.mocking.celery import mock_celery_task\n'), ((4800, 4847), 'beflow.services.qcgenerator.models.QCGeneratorPOSTResponse.parse_raw', 'QCGeneratorPOSTResponse.parse_raw', (['request.text'], {}), '(request.text)\n', (4833, 4847), False, 'from beflow.services.qcgenerator.models import QCGeneratorGETResponse, QCGeneratorPOSTBody, QCGeneratorPOSTResponse\n'), ((5528, 5584), 'pydantic.parse_raw_as', 'parse_raw_as', (['List[QCGeneratorGETResponse]', 'request.text'], {}), '(List[QCGeneratorGETResponse], request.text)\n', (5540, 5584), False, 'from pydantic import parse_raw_as\n'), ((1780, 1810), 'beflow.services.qcgenerator.app._retrieve_qc_result', '_retrieve_qc_result', (['"""1"""', '(True)'], {}), "('1', True)\n", (1799, 1810), False, 'from beflow.services.qcgenerator.app import _retrieve_qc_result\n'), ((2374, 2404), 'beflow.services.qcgenerator.app._retrieve_qc_result', '_retrieve_qc_result', (['"""1"""', '(True)'], {}), "('1', True)\n", (2393, 2404), False, 'from beflow.services.qcgenerator.app import _retrieve_qc_result\n'), ((981, 1014), 'qcelemental.models.common_models.Model', 'Model', ([], {'method': '"""rdkit"""', 'basis': 'None'}), "(method='rdkit', basis=None)\n", (986, 1014), False, 'from qcelemental.models.common_models import Model, Provenance\n'), ((1084, 1112), 'qcelemental.models.common_models.Provenance', 'Provenance', ([], {'creator': '"""pytest"""'}), "(creator='pytest')\n", (1094, 1112), False, 'from qcelemental.models.common_models import Model, Provenance\n'), ((1133, 1157), 'qcelemental.models.AtomicResultProperties', 'AtomicResultProperties', ([], {}), '()\n', (1155, 1157), False, 'from qcelemental.models import AtomicResult, AtomicResultProperties, DriverEnum\n'), ((4560, 4598), 'beflow.services.qcgenerator.models.QCGeneratorPOSTBody', 'QCGeneratorPOSTBody', ([], {'input_schema': 'task'}), '(input_schema=task)\n', (4579, 4598), False, 'from beflow.services.qcgenerator.models import QCGeneratorGETResponse, QCGeneratorPOSTBody, QCGeneratorPOSTResponse\n'), ((3715, 3746), 'qcelemental.models.common_models.Model', 'Model', ([], {'method': '"""uff"""', 'basis': 'None'}), "(method='uff', basis=None)\n", (3720, 3746), False, 'from qcelemental.models.common_models import Model, Provenance\n'), ((3979, 4010), 'qcelemental.models.common_models.Model', 'Model', ([], {'method': '"""uff"""', 'basis': 'None'}), "(method='uff', basis=None)\n", (3984, 4010), False, 'from qcelemental.models.common_models import Model, Provenance\n'), ((4205, 4236), 'qcelemental.models.common_models.Model', 'Model', ([], {'method': '"""uff"""', 'basis': 'None'}), "(method='uff', basis=None)\n", (4210, 4236), False, 'from qcelemental.models.common_models import Model, Provenance\n')] |
from typing import Tuple
from base64 import b64encode, b64decode
import numpy as np
import cv2 as cv
def resize(image: np.ndarray, size: int, method: int = cv.INTER_LINEAR):
h, w = image.shape[:2]
scale = float(size) / max(w, h)
resize_to = (int(scale * w), int(scale * h))
return cv.resize(image, resize_to, interpolation=method), scale
class ImageResizer:
def __init__(
self,
input_size: Tuple[int, int],
output_size: int = -1,
scale: Tuple[float, float] = (1.0, 1.0),
channels: int = 3,
dtype: int = np.uint8
):
if output_size > 0:
scale_x = float(output_size) / max(input_size)
scale = (scale_x, scale_x)
self.resize_to = (int(scale[0] * input_size[0]), int(scale[1] * input_size[1]))
if self.resize_to == input_size:
self.resize = False
else:
self.resize = True
self.frame_resized = np.zeros(
(self.resize_to[1], self.resize_to[0], channels), dtype
)
def resize(self, image: np.ndarray) -> np.ndarray:
return cv.resize(image, self.resize_to, self.frame_resized, cv.INTER_LINEAR)
def image_to_base64(image: np.ndarray) -> str:
return str(b64encode(cv.imencode('.jpg', image)[1]), 'utf-8')
def image_from_base64(image: str) -> np.ndarray:
return cv.imdecode(b64decode(image), cv.IMREAD_COLOR)
| [
"cv2.imencode",
"cv2.resize",
"base64.b64decode",
"numpy.zeros"
] | [((300, 349), 'cv2.resize', 'cv.resize', (['image', 'resize_to'], {'interpolation': 'method'}), '(image, resize_to, interpolation=method)\n', (309, 349), True, 'import cv2 as cv\n'), ((1127, 1196), 'cv2.resize', 'cv.resize', (['image', 'self.resize_to', 'self.frame_resized', 'cv.INTER_LINEAR'], {}), '(image, self.resize_to, self.frame_resized, cv.INTER_LINEAR)\n', (1136, 1196), True, 'import cv2 as cv\n'), ((1386, 1402), 'base64.b64decode', 'b64decode', (['image'], {}), '(image)\n', (1395, 1402), False, 'from base64 import b64encode, b64decode\n'), ((960, 1025), 'numpy.zeros', 'np.zeros', (['(self.resize_to[1], self.resize_to[0], channels)', 'dtype'], {}), '((self.resize_to[1], self.resize_to[0], channels), dtype)\n', (968, 1025), True, 'import numpy as np\n'), ((1271, 1297), 'cv2.imencode', 'cv.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (1282, 1297), True, 'import cv2 as cv\n')] |
"""
shows evolution of compounded interest for different period of time and different growth rates
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
f, axarr = plt.subplots(1, 2)
fig = plt.figure()
ax = fig.gca(projection='3d')
n = np.arange(0, 10, .1)
r = np.arange(0, 1, 0.01)
n, r = np.meshgrid(n, r)
z = (1+r)**n
z = np.log(z)
surf = ax.plot_surface(n, r, z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show() | [
"numpy.log",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((296, 314), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (308, 314), True, 'import matplotlib.pyplot as plt\n'), ((322, 334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (332, 334), True, 'import matplotlib.pyplot as plt\n'), ((369, 390), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {}), '(0, 10, 0.1)\n', (378, 390), True, 'import numpy as np\n'), ((394, 415), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (403, 415), True, 'import numpy as np\n'), ((423, 440), 'numpy.meshgrid', 'np.meshgrid', (['n', 'r'], {}), '(n, r)\n', (434, 440), True, 'import numpy as np\n'), ((458, 467), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (464, 467), True, 'import numpy as np\n'), ((636, 646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (644, 646), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf8 -*-
# Copyright 2012 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Library Classes
===============
This file contains the basic objects to build a problem and to do a single evaluation.
.. inheritance-diagram:: panobbgo_lib.lib
.. Note:: This is used by :mod:`panobbgo` and :mod:`panobbgo_lib`.
.. codeauthor:: <NAME> <<EMAIL>>
"""
# ATTN: make sure, that this doesn't depend on the config or threading modules.
# the serialization and reconstruction won't work!
import numpy as np
from IPython.utils.timing import time
class Point:
"""
This contains the x vector for a new point and a
reference to :attr:`.who` has generated it.
"""
def __init__(self, x, who):
if not isinstance(who, basestring):
raise ValueError(
'who needs to be a string describing the heuristic, was %s of type %s'
% (who, type(who)))
if not isinstance(x, np.ndarray):
x = np.array(x, dtype=np.float64)
self._x = x
self._who = who # heuristic.name, a string
def __repr__(self):
"""
>>> Point
<class panobbgo_lib.lib.Point at ...>
>>> x = np.array([1,2])
>>> repr(Point(x, 'doctest'))
'[1 2] by doctest'
"""
return '%s by %s' % (self.x, self.who)
@property
def x(self):
"""
The vector :math:`x`, a :class:`numpy.ndarray`
"""
return self._x
@property
def who(self):
"""
A string, which is the :attr:`~panobbgo.core.Module.name` of a heuristic.
To get the actual heuristic, use the strategie's
:meth:`~panobbgo.core.StrategyBase.heuristic` method.
"""
return self._who
def __getitem__(self, item):
"""
get x vector
>>> p = Point(np.array([1, 42]), "doctest")
>>> p[1]
42
"""
return self._x[item]
class Result:
r"""
This represents one result, wich is a mapping of a :class:`.Point`
:math:`x \rightarrow f(x)`.
Additionally, there is also
- :attr:`.error`: estimated or calculated :math:`\Delta f(x)`.
- :attr:`.cv_vec`: a possibly empty vector listing the constraint violation for
each constraint.
"""
def __init__(self, point, fx, cv_vec=None, cv_norm=None, error=0.0):
"""
Args:
- ``cv``: the constraint violation vector
- ``cv_norm``: the norm used to calculate :attr:`.cv`.
(see :func:`numpy.linalg.norm`, default ``None`` means 2-norm)
"""
if point and not isinstance(point, Point):
raise ValueError("point must be an instance of lib.Point")
self._point = point
self._fx = fx
self._error = error
self._cv_vec = cv_vec
self._cv_norm = cv_norm
self._time = time.time()
@property
def x(self):
"""
Point :math:`x` where this result has been evaluated.
"""
return self.point.x if self.point else None
@property
def point(self):
"""
Returns the actual :class:`.Point` object.
"""
return self._point
@property
def fx(self):
"""
The function value :math:`f(x)` after :meth:`evaluating <panobbgo_lib.lib.Problem.eval>` it.
"""
return self._fx
@property
def cv_vec(self):
"""
Vector of constraint violations for each constraint, or None.
.. Note::
Be aware, that entries could be negative. This is useful if you want to know
how well a point is satisfied. The `.cv` property just looks at the positive
entries, though.
"""
return self._cv_vec
@property
def cv(self):
"""
Constraint Violation.
The chosen norm of :attr:`.cv_vec`; see ``cv_norm`` in constructor.
.. Note::
Only the positive entries are used to calculate the norm!
"""
if self._cv_vec is None:
return 0.0
from numpy.linalg import norm
return norm(self._cv_vec[self._cv_vec > 0.0], self._cv_norm)
@property
def pp(self):
"""
pareto point, i.e. array([cv, fx])
"""
return np.array([self.cv, self.fx])
@property
def who(self):
"""
The :attr:`~panobbgo.core.Module.name` of the heuristic, who
did generate this point (String).
"""
return self.point.who
@property
def error(self):
"""
Error margin of function evaluation, usually 0.0.
"""
return self._error
def __cmp__(self, other):
"""
Compare with other point by fx (and fx only!).
.. Note ::
This is also used by mechanism
like Best -> pareto_front
"""
assert isinstance(other, Result)
return cmp(self._fx, other._fx)
def __unicode__(self):
x = u' '.join(
u'%11.6f' % _ for _ in self.x) if self.x is not None else None
cv = '' if self._cv_vec is None else u'\u22DB%8.4f ' % self.cv
ret = u'{:11.6f} {}@ [{}]'.format(self.fx, cv, x)
return ret
class BoundingBox:
"""
The bounding box of the :class:`Problem`
"""
# this follows http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
# #slightly-more-realistic-example-attribute-added-to-existing-array
def __init__(self, box, dx=None):
self.box = np.asarray(box, dtype=np.float64)
assert self.box.shape[1] == 2, "converting box to n x 2 array failed"
if dx is not None:
self.box += dx
self.ranges = self.box.ptp(axis=1) # self._box[:,1] - self._box[:,0]
self.center = self.box[:, 0] + self.ranges / 2.
for arr in [self.box, dx, self.ranges, self.center]:
if arr is not None:
arr.setflags(write=False)
def copy(self):
return type(self)(self.box.copy())
def __contains__(self, point):
"""
:param Point point: the box of the problem
"""
l = np.alltrue(point.x >= self.box[:, 0])
u = np.alltrue(point.x <= self.box[:, 1])
return l and u
def __getitem__(self, item):
return self.box.__getitem__(item)
def __setitem__(self, key, value):
return self.box.__setitem__(key, value)
class Problem:
"""
this is used to store the objective function,
information about the problem, etc.
"""
def __init__(self, box=None, dx=None):
r"""
:param list box: list of tuples for the bounding box with length n,
e.g.: :math:`\left[ (-1,1), (-100, 0), (0, 0.01) \right]`.
:param list dx: translational offset which also affects the box,
n-dimensional vector (default: None)
"""
assert isinstance(box, (list, tuple)), "box argument must be a list or tuple"
from numbers import Number
for entry in box:
assert len(entry) == 2, "box entries must be of length 2"
for e in entry:
assert isinstance(e, Number), "box entries must be numbers"
assert entry[0] <= entry[1], "box entries must be non-decreasing"
self._dim = len(box)
if dx is not None:
dx = np.array(dx, dtype=np.float64)
dx.setflags(write=False)
self._box = BoundingBox(box, dx)
self.dx = dx
@property
def dim(self):
"""
The number of dimensions.
"""
return self._dim
@property
def ranges(self):
"""
The ranges along each dimension, a :class:`numpy.ndarray`.
"""
return self.box.ranges
@property
def box(self):
r"""
The bounding box for this problem, a :math:`(\mathit{dim},2)`-:class:`array <.BoundingBox>`.
"""
return self._box
@property
def center(self):
r"""
center of the box
"""
return self.box.center
def project(self, point):
r"""
projects given point into the search box.
e.g. :math:`[-1.1, 1]` with box :math:`[(-1,1),(-1,1)]`
gives :math:`[-1,1]`
"""
assert isinstance(point, np.ndarray), 'point must be a numpy ndarray'
return np.minimum(np.maximum(point, self.box[:, 0]), self.box[:, 1])
def random_point(self):
"""
generates a random point inside the given search box (ranges).
"""
# uniformly
return self.ranges * np.random.rand(self.dim) + self._box[:, 0]
# TODO other distributions, too?
def eval(self, x):
"""
This is called to evaluate the given black-box function.
The problem should be called directly (``__call__`` special function wraps this)
and the given problem should subclass this ``eval`` method.
:rtype: numpy.float64
"""
raise Exception("You have to subclass and overwrite the eval function")
def eval_constraints(self, x):
"""
This method is optionally overwritten by the problem to calculate the constraint violations.
It has to return a :class:`numpy.ndarray` of ``floats``.
:rtype: numpy.ndarray
"""
pass
def __call__(self, point):
x = point.x + self.dx if self.dx is not None else point.x
fx = self.eval(x)
cv = self.eval_constraints(x)
return Result(point, fx, cv_vec=cv)
def __repr__(self):
descr = "Problem '{}': {:d} dims, ".format(
self.__class__.__name__, self._dim)
p = [_ for _ in iter(self.__dict__.items()) if not _[0].startswith("_")]
descr += "params: %s, " % dict(p)
descr += "box: [%s]" % ', '.join(
'[%.2f %.2f]' % (l, u) for l, u in self._box)
return descr
| [
"numpy.alltrue",
"numpy.random.rand",
"numpy.asarray",
"numpy.array",
"IPython.utils.timing.time.time",
"numpy.linalg.norm",
"numpy.maximum"
] | [((3385, 3396), 'IPython.utils.timing.time.time', 'time.time', ([], {}), '()\n', (3394, 3396), False, 'from IPython.utils.timing import time\n'), ((4633, 4686), 'numpy.linalg.norm', 'norm', (['self._cv_vec[self._cv_vec > 0.0]', 'self._cv_norm'], {}), '(self._cv_vec[self._cv_vec > 0.0], self._cv_norm)\n', (4637, 4686), False, 'from numpy.linalg import norm\n'), ((4802, 4830), 'numpy.array', 'np.array', (['[self.cv, self.fx]'], {}), '([self.cv, self.fx])\n', (4810, 4830), True, 'import numpy as np\n'), ((6030, 6063), 'numpy.asarray', 'np.asarray', (['box'], {'dtype': 'np.float64'}), '(box, dtype=np.float64)\n', (6040, 6063), True, 'import numpy as np\n'), ((6656, 6693), 'numpy.alltrue', 'np.alltrue', (['(point.x >= self.box[:, 0])'], {}), '(point.x >= self.box[:, 0])\n', (6666, 6693), True, 'import numpy as np\n'), ((6706, 6743), 'numpy.alltrue', 'np.alltrue', (['(point.x <= self.box[:, 1])'], {}), '(point.x <= self.box[:, 1])\n', (6716, 6743), True, 'import numpy as np\n'), ((1488, 1517), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (1496, 1517), True, 'import numpy as np\n'), ((7889, 7919), 'numpy.array', 'np.array', (['dx'], {'dtype': 'np.float64'}), '(dx, dtype=np.float64)\n', (7897, 7919), True, 'import numpy as np\n'), ((8903, 8936), 'numpy.maximum', 'np.maximum', (['point', 'self.box[:, 0]'], {}), '(point, self.box[:, 0])\n', (8913, 8936), True, 'import numpy as np\n'), ((9127, 9151), 'numpy.random.rand', 'np.random.rand', (['self.dim'], {}), '(self.dim)\n', (9141, 9151), True, 'import numpy as np\n')] |
import numpy as np
import math
class metacvpartition():
def __init__(self, labels, nFolds, metaSegmentLength, debug=False):
"""
Meta-segmented cross-validation [1].
C = metacvpartition(labels, nFolds, metaSegmentLength);
labels is a Nx1 matrix with (integer) labels. nFolds is the
number of folds in the cross-validation. metaSegmentLength
is the number of frames in each meta-segment.
C is an object with a similar interface to cvpartition.
C.training(i) Nx1 Indicator-matrix for
training-set i.
C.test(i) Nx1 Indicator-matrix for test-set
i.
Other fields:
C.numtestSets
C.foldDistribution
C.classDistribution
[1] Hammerla, <NAME>., and <NAME>. "Let's (not) stick together:
pairwise similarity biases cross-validation in activity
recognition." Proceedings of the 2015 ACM international joint
conference on pervasive and ubiquitous computing. ACM, 2015.
"""
# initialize
self.N = len(labels)
self.numTestSets = nFolds
# number of meta-segments
nP = math.ceil(self.N / metaSegmentLength)
if debug:
print('nP =', nP)
self.indices = np.zeros((self.N, 1))
if debug:
print('indices =', self.indices)
# get classes
c = np.unique(labels)
if debug:
print('c =', c)
# transform to integer labels
L = labels.astype(int)
for i in c:
L[labels == i] = i
if debug:
print('L =', L)
# get overall distribution of labels
self.classDistribution = np.bincount(L).T
if debug:
print('classDistribution =', self.classDistribution)
# initialize met-segment class distribution matrix
cDist = np.zeros((int(nP), len(c)))
self.foldDistributions = np.zeros((nFolds, len(c)))
# estimate class distributions for each meta-segment
for i in range(int(nP)):
# get meta-segment label-distribution
if i*metaSegmentLength <= self.N:
l = L[i*metaSegmentLength:(i+1)*metaSegmentLength]
if debug:
print('i*metaSegmentLength = ', i*metaSegmentLength)
else:
l = L[i*metaSegmentLength:-1]
print('l =', l)
# get labels unique to this meta-segment
d = np.unique(l)
if debug:
print('d =', d)
# save in matrix
dl = np.bincount(l)
if debug:
print('dl =', dl)
dl[dl > 0] += np.add(
dl[dl > 0 ]
, np.random.random((sum(dl > 0),)) * 0.1
, out=dl[dl > 0]
, casting='unsafe'
)
# assign non-zero elements
cDist[i, d] = dl[dl > 0]
if debug:
print('cDist =', cDist)
# add some noise for randomness of xval
cDist[i, :] = cDist[i, :]
# Here comes the trick: sort lexicographically
# [~, I] = np.lexsort(cDist)
if debug:
print('cDist.shape = ', cDist.shape)
I = np.lexsort([col for col in cDist.T], axis=0)
if debug:
print('I = ', I)
# "I" now contains sorted list of distributions (ascending)
# Now: assign folds
# ind = 1 + mod(1:len(I), nFolds)
# ind = 1 + np.mod(np.arange(len(I)), nFolds)
ind = np.mod(np.arange(len(I)), nFolds)
if debug:
print('ind = ', ind)
ind[I] = ind
if debug:
print('ind[I] =', ind[I])
# save fold-wise distibutions for reference
for i in range(nFolds):
d = np.sum(cDist[ind == i, :], axis=0)
if debug:
print('cDist[ind == i, :] = ', cDist[ind == i, :])
print('d = ', d)
self.foldDistributions[i, :] = d / np.sum(d)
if debug:
print('foldDistributions = ', self.foldDistributions)
# assign fold to each sample
for i in range(int(nP)):
self.indices[i*metaSegmentLength:(i+1)*metaSegmentLength] = ind[i]
if debug:
print('indices = ', self.indices)
# make sure the indices it's the right size
# self.indices = self.indices[1:self.N]
self.indices = self.indices[0:self.N].flatten().astype(int)
self.TestSize = np.bincount(self.indices).T
# self.TrainSize = size(self.indices, 1) - self.TestSize
self.TrainSize = self.indices.shape[0] - self.TestSize
# def trainIndices(self, cv, fold):
def trainIndices(self, fold):
# return binary training mask from fold `fold`
return np.flatnonzero(self.indices != fold)
# def testIndices(self, cv, fold):
def testIndices(self, fold):
# return binary testing mask from fold `fold`
return np.flatnonzero(self.indices == fold)
def splitsGenerator(self):
for fold in range(self.numTestSets):
yield(
self.trainIndices(fold),
self.testIndices(fold)
)
| [
"math.ceil",
"numpy.unique",
"numpy.flatnonzero",
"numpy.lexsort",
"numpy.zeros",
"numpy.sum",
"numpy.bincount"
] | [((1321, 1358), 'math.ceil', 'math.ceil', (['(self.N / metaSegmentLength)'], {}), '(self.N / metaSegmentLength)\n', (1330, 1358), False, 'import math\n'), ((1430, 1451), 'numpy.zeros', 'np.zeros', (['(self.N, 1)'], {}), '((self.N, 1))\n', (1438, 1451), True, 'import numpy as np\n'), ((1550, 1567), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1559, 1567), True, 'import numpy as np\n'), ((3428, 3472), 'numpy.lexsort', 'np.lexsort', (['[col for col in cDist.T]'], {'axis': '(0)'}), '([col for col in cDist.T], axis=0)\n', (3438, 3472), True, 'import numpy as np\n'), ((4997, 5033), 'numpy.flatnonzero', 'np.flatnonzero', (['(self.indices != fold)'], {}), '(self.indices != fold)\n', (5011, 5033), True, 'import numpy as np\n'), ((5176, 5212), 'numpy.flatnonzero', 'np.flatnonzero', (['(self.indices == fold)'], {}), '(self.indices == fold)\n', (5190, 5212), True, 'import numpy as np\n'), ((1859, 1873), 'numpy.bincount', 'np.bincount', (['L'], {}), '(L)\n', (1870, 1873), True, 'import numpy as np\n'), ((2642, 2654), 'numpy.unique', 'np.unique', (['l'], {}), '(l)\n', (2651, 2654), True, 'import numpy as np\n'), ((2756, 2770), 'numpy.bincount', 'np.bincount', (['l'], {}), '(l)\n', (2767, 2770), True, 'import numpy as np\n'), ((3990, 4024), 'numpy.sum', 'np.sum', (['cDist[ind == i, :]'], {'axis': '(0)'}), '(cDist[ind == i, :], axis=0)\n', (3996, 4024), True, 'import numpy as np\n'), ((4696, 4721), 'numpy.bincount', 'np.bincount', (['self.indices'], {}), '(self.indices)\n', (4707, 4721), True, 'import numpy as np\n'), ((4194, 4203), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (4200, 4203), True, 'import numpy as np\n')] |
import qi
import sys
import time
import almath
import numpy as np
import math
class Pepper():
def __init__(self):
self.connection_url = "tcp://172.16.58.3:9559"
self.app = qi.Application(["PredictionDemo", "--qi-url=" + self.connection_url])
self.app.start()
self.session = self.app.session
self.motion = self.session.service("ALMotion")
self.names = ["HeadYaw", "HeadPitch", "RShoulderPitch", "RShoulderRoll", "RElbowYaw", "RElbowRoll", "RWristYaw", "LShoulderPitch", "LShoulderRoll", "LElbowYaw", "LElbowRoll", "LWristYaw"]
self.angles = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.fractionMaxSpeed = 0.1
self.centerPelvisIndex = 0;
self.midBodyIndex = 12;
self.neckBaseIndex = 13;
self.noseIndex = 14;
self.headIndex = 15;
self.rightShoulderIndex = 17;
self.rightElbowIndex = 18;
self.rightHandIndex = 19;
self.rightWristIndex = 20;
self.rightThumbIndex = 21;
self.rightFingerTipIndex = 22;
self.leftShoulderIndex = 25;
self.leftElbowIndex = 26;
self.leftHandIndex = 27;
self.leftWristIndex = 28;
self.leftThumbIndex = 29;
self.leftFingerTipIndex = 30;
def movePepper(self, vals):
self.centerPelvisX = vals[self.centerPelvisIndex,0];
self.centerPelvisY = vals[self.centerPelvisIndex,1];
self.centerPelvisZ = vals[self.centerPelvisIndex,2];
self.midBodyX = vals[self.midBodyIndex,0];
self.midBodyY = vals[self.midBodyIndex,1];
self.midBodyZ = vals[self.midBodyIndex,2];
self.neckBaseX = vals[self.neckBaseIndex,0];
self.neckBaseY = vals[self.neckBaseIndex,1];
self.neckBaseZ = vals[self.neckBaseIndex,2];
self.noseX = vals[self.noseIndex,0];
self.noseY = vals[self.noseIndex,1];
self.noseZ = vals[self.noseIndex,2];
self.headX = vals[self.headIndex,0];
self.headY = vals[self.headIndex,1];
self.headZ = vals[self.headIndex,2];
self.rightShoulderX = vals[self.rightShoulderIndex,0];
self.rightShoulderY = vals[self.rightShoulderIndex,1];
self.rightShoulderZ = vals[self.rightShoulderIndex,2];
self.rightElbowX = vals[self.rightElbowIndex,0];
self.rightElbowY = vals[self.rightElbowIndex,1];
self.rightElbowZ = vals[self.rightElbowIndex,2];
self.rightHandX = vals[self.rightHandIndex,0];
self.rightHandY = vals[self.rightHandIndex,1];
self.rightHandZ = vals[self.rightHandIndex,2];
self.rightWristX = vals[self.rightWristIndex,0];
self.rightWristY = vals[self.rightWristIndex,1];
self.rightWristZ = vals[self.rightWristIndex,2];
self.rightThumbX = vals[self.rightThumbIndex,0];
self.rightThumbY = vals[self.rightThumbIndex,1];
self.rightThumbZ = vals[self.rightThumbIndex,2];
self.rightFingerTipX = vals[self.rightFingerTipIndex,0];
self.rightFingerTipY = vals[self.rightFingerTipIndex,1];
self.rightFingerTipZ = vals[self.rightFingerTipIndex,2];
self.leftShoulderX = vals[self.leftShoulderIndex,0];
self.leftShoulderY = vals[self.leftShoulderIndex,1];
self.leftShoulderZ = vals[self.leftShoulderIndex,2];
self.leftElbowX = vals[self.leftElbowIndex,0];
self.leftElbowY = vals[self.leftElbowIndex,1];
self.leftElbowZ = vals[self.leftElbowIndex,2];
self.leftHandX = vals[self.leftHandIndex,0];
self.leftHandY = vals[self.leftHandIndex,1];
self.leftHandZ = vals[self.leftHandIndex,2];
self.leftWristX = vals[self.leftWristIndex,0];
self.leftWristY = vals[self.leftWristIndex,1];
self.leftWristZ = vals[self.leftWristIndex,2];
self.leftThumbX = vals[self.leftThumbIndex,0];
self.leftThumbY = vals[self.leftThumbIndex,1];
self.leftThumbZ = vals[self.leftThumbIndex,2];
self.leftFingerTipX = vals[self.leftFingerTipIndex,0];
self.leftFingerTipY = vals[self.leftFingerTipIndex,1];
self.leftFingerTipZ = vals[self.leftFingerTipIndex,2];
# From kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/
headPlanePoint1 = np.array([self.neckBaseX, self.neckBaseY, self.neckBaseZ])
headPlanePoint2 = np.array([self.noseX, self.noseY, self.noseZ])
headPlanePoint3 = np.array([self.headX, self.headY, self.headZ])
headPlaneVector1 = headPlanePoint3 - headPlanePoint2
headPlaneVector2 = headPlanePoint1 - headPlanePoint2
headPlaneCrossProduct = np.cross(headPlaneVector1, headPlaneVector2)
headPlaneEquationA, headPlaneEquationB, headPlaneEquationC = headPlaneCrossProduct
headPlaneEquationD = np.dot(headPlaneCrossProduct, headPlanePoint3)
# From kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/
bodyPlanePoint1 = np.array([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])
bodyPlanePoint2 = np.array([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])
bodyPlanePoint3 = np.array([self.midBodyX, self.midBodyY, self.midBodyZ])
bodyPlaneVector1 = bodyPlanePoint3 - bodyPlanePoint1
bodyPlaneVector2 = bodyPlanePoint2 - bodyPlanePoint1
bodyPlaneCrossProduct = np.cross(bodyPlaneVector1, bodyPlaneVector2)
bodyPlaneEquationA, bodyPlaneEquationB, bodyPlaneEquationC = bodyPlaneCrossProduct
bodyPlaneEquationD = np.dot(bodyPlaneCrossProduct, bodyPlanePoint3)
# Angle between 2 planes from https://math.tutorvista.com/geometry/angle-between-two-planes.html
headYaw = math.acos((bodyPlaneEquationA * headPlaneEquationA + bodyPlaneEquationB * headPlaneEquationB + bodyPlaneEquationC * headPlaneEquationC) / (math.sqrt(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) * math.sqrt(headPlaneEquationA ** 2 + headPlaneEquationB ** 2 + headPlaneEquationC ** 2)))
pepperHeadYaw = max(min(headYaw - math.pi/2, math.radians(90)), math.radians(-90))
# Angle between a line and a plane from https://www.vitutor.com/geometry/distance/line_plane.html
headVector = headPlanePoint3 - headPlanePoint1
pepperHeadPitch = math.asin((bodyPlaneEquationA * headVector[0] + bodyPlaneEquationB * headVector[1] + bodyPlaneEquationC * headVector[2]) / (math.sqrt(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) * math.sqrt(headVector[0] ** 2 + headVector[1] ** 2 + headVector[2] ** 2)))
lengthOfRightUpperArm = math.sqrt((self.rightShoulderX - self.rightElbowX) ** 2 + (self.rightShoulderY - self.rightElbowY) ** 2 + (self.rightShoulderZ - self.rightElbowZ) ** 2)
deltaZRightUpperArm = self.rightShoulderZ - self.rightElbowZ
# From https://mathinsight.org/distance_point_plane
distanceRightElbowPointFromBodyPlane = (bodyPlaneEquationA * self.rightElbowX + bodyPlaneEquationB * self.rightElbowY + bodyPlaneEquationC * self.rightElbowZ + bodyPlaneEquationD) / math.sqrt(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)
pepperRightShoulderPitch = min(max(math.atan2(deltaZRightUpperArm, distanceRightElbowPointFromBodyPlane), math.radians(-119.5)), math.radians(119.5))
pepperRightShoulderRoll = max(-math.acos(abs(deltaZRightUpperArm) / lengthOfRightUpperArm), math.radians(-89.5))
# From https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_plane
rightElbowTranslatedD = (bodyPlaneEquationD - bodyPlaneEquationA * self.rightElbowX - bodyPlaneEquationB * self.rightElbowY - bodyPlaneEquationC * self.rightElbowZ)
closestPointOnBodyPlaneToRightElbowPointX = (bodyPlaneEquationA * rightElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.rightElbowX
closestPointOnBodyPlaneToRightElbowPointY = (bodyPlaneEquationB * rightElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.rightElbowY
closestPointOnBodyPlaneToRightElbowPointZ = (bodyPlaneEquationC * rightElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.rightElbowZ
closestPointOnBodyPlaneToRightElbowPoint = np.array([closestPointOnBodyPlaneToRightElbowPointX, closestPointOnBodyPlaneToRightElbowPointY, closestPointOnBodyPlaneToRightElbowPointZ])
rightUpperArmPlanePoint1 = np.array([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])
rightUpperArmPlanePoint2 = np.array([closestPointOnBodyPlaneToRightElbowPoint[0], closestPointOnBodyPlaneToRightElbowPoint[1], closestPointOnBodyPlaneToRightElbowPoint[2] + deltaZRightUpperArm])
rightUpperArmPlanePoint3 = np.array([self.rightElbowX, self.rightElbowY, self.rightElbowZ])
rightUpperArmPlaneVector1 = rightUpperArmPlanePoint3 - rightUpperArmPlanePoint1
rightUpperArmPlaneVector2 = rightUpperArmPlanePoint2 - rightUpperArmPlanePoint1
rightUpperArmPlaneCrossProduct = np.cross(rightUpperArmPlaneVector1, rightUpperArmPlaneVector2)
rightUpperArmPlaneEquationA, rightUpperArmPlaneEquationB, rightUpperArmPlaneEquationC = rightUpperArmPlaneCrossProduct
rightArmPlanePoint1 = np.array([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])
rightArmPlanePoint2 = np.array([self.rightElbowX, self.rightElbowY, self.rightElbowZ])
rightArmPlanePoint3 = np.array([self.rightHandX, self.rightHandY, self.rightHandZ])
rightArmPlaneVector1 = rightArmPlanePoint1 - rightArmPlanePoint2
rightArmPlaneVector2 = rightArmPlanePoint3 - rightArmPlanePoint2
rightArmPlaneCrossProduct = np.cross(rightArmPlaneVector1, rightArmPlaneVector2)
rightArmPlaneEquationA, rightArmPlaneEquationB, rightArmPlaneEquationC = rightArmPlaneCrossProduct
# Angle between 2 planes from https://math.tutorvista.com/geometry/angle-between-two-planes.html
pepperRightElbowYaw = math.acos((rightUpperArmPlaneEquationA * rightArmPlaneEquationA + rightUpperArmPlaneEquationB * rightArmPlaneEquationB + rightUpperArmPlaneEquationC * rightArmPlaneEquationC) / (math.sqrt(rightUpperArmPlaneEquationA ** 2 + rightUpperArmPlaneEquationB ** 2 + rightUpperArmPlaneEquationC ** 2) * math.sqrt(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + rightArmPlaneEquationC ** 2)))
# Angle between 3 points from https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space
rightArmPlaneVector1Magnitude = math.sqrt(rightArmPlaneVector1[0] ** 2 + rightArmPlaneVector1[1] ** 2 + rightArmPlaneVector1[2] ** 2)
rightArmPlaneVector2Magnitude = math.sqrt(rightArmPlaneVector2[0] ** 2 + rightArmPlaneVector2[1] ** 2 + rightArmPlaneVector2[2] ** 2)
rightArmPlaneVector1Normalized = rightArmPlaneVector1 / rightArmPlaneVector1Magnitude
rightArmPlaneVector2Normalized = rightArmPlaneVector2 / rightArmPlaneVector2Magnitude
pepperRightElbowRoll = min(math.radians(180) - math.acos(np.dot(rightArmPlaneVector1Normalized, rightArmPlaneVector2Normalized)), math.radians(89.5))
rightForearmPlanePoint1 = np.array([self.rightElbowX, self.rightElbowY, self.rightElbowZ])
rightForearmPlanePoint2 = np.array([self.rightWristX, self.rightWristY, self.rightWristZ])
rightForearmPlanePoint3 = np.array([self.rightThumbX, self.rightThumbY, self.rightThumbZ])
rightForearmPlaneVector1 = rightForearmPlanePoint1 - rightForearmPlanePoint2
rightForearmPlaneVector2 = rightForearmPlanePoint3 - rightForearmPlanePoint2
rightForearmPlaneCrossProduct = np.cross(rightForearmPlaneVector1, rightForearmPlaneVector2)
rightForearmPlaneEquationA, rightForearmPlaneEquationB, rightForearmPlaneEquationC = rightForearmPlaneCrossProduct
# Angle between 2 planes from https://math.tutorvista.com/geometry/angle-between-two-planes.html
pepperRightWristYaw = math.acos((rightArmPlaneEquationA * rightForearmPlaneEquationA + rightArmPlaneEquationB * rightForearmPlaneEquationB + rightArmPlaneEquationC * rightForearmPlaneEquationC) / (math.sqrt(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + rightArmPlaneEquationC ** 2) * math.sqrt(rightForearmPlaneEquationA ** 2 + rightForearmPlaneEquationB ** 2 + rightForearmPlaneEquationC ** 2)))
lengthOfLeftUpperArm = math.sqrt((self.leftShoulderX - self.leftElbowX) ** 2 + (self.leftShoulderY - self.leftElbowY) ** 2 + (self.leftShoulderZ - self.leftElbowZ) ** 2)
deltaZLeftUpperArm = self.leftShoulderZ - self.leftElbowZ
# From https://mathinsight.org/distance_point_plane
distanceLeftElbowPointFromBodyPlane = (bodyPlaneEquationA * self.leftElbowX + bodyPlaneEquationB * self.leftElbowY + bodyPlaneEquationC * self.leftElbowZ + bodyPlaneEquationD) / math.sqrt(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)
pepperLeftShoulderPitch = min(max(math.atan2(deltaZLeftUpperArm, distanceLeftElbowPointFromBodyPlane), math.radians(-119.5)), math.radians(119.5))
pepperLeftShoulderRoll = min(math.acos(abs(deltaZLeftUpperArm) / lengthOfLeftUpperArm), math.radians(89.5))
# From https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_plane
leftElbowTranslatedD = (bodyPlaneEquationD - bodyPlaneEquationA * self.leftElbowX - bodyPlaneEquationB * self.leftElbowY - bodyPlaneEquationC * self.leftElbowZ)
closestPointOnBodyPlaneToLeftElbowPointX = (bodyPlaneEquationA * leftElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.leftElbowX
closestPointOnBodyPlaneToLeftElbowPointY = (bodyPlaneEquationB * leftElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.leftElbowY
closestPointOnBodyPlaneToLeftElbowPointZ = (bodyPlaneEquationC * leftElbowTranslatedD) / (bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2) + self.leftElbowZ
closestPointOnBodyPlaneToLeftElbowPoint = np.array([closestPointOnBodyPlaneToLeftElbowPointX, closestPointOnBodyPlaneToLeftElbowPointY, closestPointOnBodyPlaneToLeftElbowPointZ])
leftUpperArmPlanePoint1 = np.array([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])
leftUpperArmPlanePoint2 = np.array([closestPointOnBodyPlaneToLeftElbowPoint[0], closestPointOnBodyPlaneToLeftElbowPoint[1], closestPointOnBodyPlaneToLeftElbowPoint[2] + deltaZLeftUpperArm])
leftUpperArmPlanePoint3 = np.array([self.leftElbowX, self.leftElbowY, self.leftElbowZ])
leftUpperArmPlaneVector1 = leftUpperArmPlanePoint3 - leftUpperArmPlanePoint1
leftUpperArmPlaneVector2 = leftUpperArmPlanePoint2 - leftUpperArmPlanePoint1
leftUpperArmPlaneCrossProduct = np.cross(leftUpperArmPlaneVector1, leftUpperArmPlaneVector2)
leftUpperArmPlaneEquationA, leftUpperArmPlaneEquationB, leftUpperArmPlaneEquationC = leftUpperArmPlaneCrossProduct
leftArmPlanePoint1 = np.array([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])
leftArmPlanePoint2 = np.array([self.leftElbowX, self.leftElbowY, self.leftElbowZ])
leftArmPlanePoint3 = np.array([self.leftHandX, self.leftHandY, self.leftHandZ])
leftArmPlaneVector1 = leftArmPlanePoint1 - leftArmPlanePoint2
leftArmPlaneVector2 = leftArmPlanePoint3 - leftArmPlanePoint2
leftArmPlaneCrossProduct = np.cross(leftArmPlaneVector1, leftArmPlaneVector2)
leftArmPlaneEquationA, leftArmPlaneEquationB, leftArmPlaneEquationC = leftArmPlaneCrossProduct
# Angle between 2 planes from https://math.tutorvista.com/geometry/angle-between-two-planes.html
pepperLeftElbowYaw = -math.acos((leftUpperArmPlaneEquationA * leftArmPlaneEquationA + leftUpperArmPlaneEquationB * leftArmPlaneEquationB + leftUpperArmPlaneEquationC * leftArmPlaneEquationC) / (math.sqrt(leftUpperArmPlaneEquationA ** 2 + leftUpperArmPlaneEquationB ** 2 + leftUpperArmPlaneEquationC ** 2) * math.sqrt(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + leftArmPlaneEquationC ** 2)))
# Angle between 3 points from https://stackoverflow.com/questions/19729831/angle-between-3-points-in-3d-space
leftArmPlaneVector1Magnitude = math.sqrt(leftArmPlaneVector1[0] ** 2 + leftArmPlaneVector1[1] ** 2 + leftArmPlaneVector1[2] ** 2)
leftArmPlaneVector2Magnitude = math.sqrt(leftArmPlaneVector2[0] ** 2 + leftArmPlaneVector2[1] ** 2 + leftArmPlaneVector2[2] ** 2)
leftArmPlaneVector1Normalized = leftArmPlaneVector1 / leftArmPlaneVector1Magnitude
leftArmPlaneVector2Normalized = leftArmPlaneVector2 / leftArmPlaneVector2Magnitude
pepperLeftElbowRoll = -min(math.radians(180) - math.acos(np.dot(leftArmPlaneVector1Normalized, leftArmPlaneVector2Normalized)), math.radians(89.5))
leftForearmPlanePoint1 = np.array([self.leftElbowX, self.leftElbowY, self.leftElbowZ])
leftForearmPlanePoint2 = np.array([self.leftWristX, self.leftWristY, self.leftWristZ])
leftForearmPlanePoint3 = np.array([self.leftThumbX, self.leftThumbY, self.leftThumbZ])
leftForearmPlaneVector1 = leftForearmPlanePoint1 - leftForearmPlanePoint2
leftForearmPlaneVector2 = leftForearmPlanePoint3 - leftForearmPlanePoint2
leftForearmPlaneCrossProduct = np.cross(leftForearmPlaneVector1, leftForearmPlaneVector2)
leftForearmPlaneEquationA, leftForearmPlaneEquationB, leftForearmPlaneEquationC = leftForearmPlaneCrossProduct
# Angle between 2 planes from https://math.tutorvista.com/geometry/angle-between-two-planes.html
pepperLeftWristYaw = -math.acos((leftArmPlaneEquationA * leftForearmPlaneEquationA + leftArmPlaneEquationB * leftForearmPlaneEquationB + leftArmPlaneEquationC * leftForearmPlaneEquationC) / (math.sqrt(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + leftArmPlaneEquationC ** 2) * math.sqrt(leftForearmPlaneEquationA ** 2 + leftForearmPlaneEquationB ** 2 + leftForearmPlaneEquationC ** 2)))
self.angles = [pepperHeadYaw, pepperHeadPitch, pepperRightShoulderPitch, pepperRightShoulderRoll, pepperRightElbowYaw, pepperRightElbowRoll, pepperRightWristYaw, pepperLeftShoulderPitch, pepperLeftShoulderRoll, pepperLeftElbowYaw, pepperLeftElbowRoll, pepperLeftWristYaw]
self.motion.setAngles(self.names,self.angles,self.fractionMaxSpeed) | [
"qi.Application",
"numpy.cross",
"math.sqrt",
"math.radians",
"numpy.array",
"numpy.dot",
"math.atan2"
] | [((193, 262), 'qi.Application', 'qi.Application', (["['PredictionDemo', '--qi-url=' + self.connection_url]"], {}), "(['PredictionDemo', '--qi-url=' + self.connection_url])\n", (207, 262), False, 'import qi\n'), ((4291, 4349), 'numpy.array', 'np.array', (['[self.neckBaseX, self.neckBaseY, self.neckBaseZ]'], {}), '([self.neckBaseX, self.neckBaseY, self.neckBaseZ])\n', (4299, 4349), True, 'import numpy as np\n'), ((4376, 4422), 'numpy.array', 'np.array', (['[self.noseX, self.noseY, self.noseZ]'], {}), '([self.noseX, self.noseY, self.noseZ])\n', (4384, 4422), True, 'import numpy as np\n'), ((4449, 4495), 'numpy.array', 'np.array', (['[self.headX, self.headY, self.headZ]'], {}), '([self.headX, self.headY, self.headZ])\n', (4457, 4495), True, 'import numpy as np\n'), ((4650, 4694), 'numpy.cross', 'np.cross', (['headPlaneVector1', 'headPlaneVector2'], {}), '(headPlaneVector1, headPlaneVector2)\n', (4658, 4694), True, 'import numpy as np\n'), ((4815, 4861), 'numpy.dot', 'np.dot', (['headPlaneCrossProduct', 'headPlanePoint3'], {}), '(headPlaneCrossProduct, headPlanePoint3)\n', (4821, 4861), True, 'import numpy as np\n'), ((4989, 5062), 'numpy.array', 'np.array', (['[self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ]'], {}), '([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])\n', (4997, 5062), True, 'import numpy as np\n'), ((5089, 5159), 'numpy.array', 'np.array', (['[self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ]'], {}), '([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])\n', (5097, 5159), True, 'import numpy as np\n'), ((5186, 5241), 'numpy.array', 'np.array', (['[self.midBodyX, self.midBodyY, self.midBodyZ]'], {}), '([self.midBodyX, self.midBodyY, self.midBodyZ])\n', (5194, 5241), True, 'import numpy as np\n'), ((5396, 5440), 'numpy.cross', 'np.cross', (['bodyPlaneVector1', 'bodyPlaneVector2'], {}), '(bodyPlaneVector1, bodyPlaneVector2)\n', (5404, 5440), True, 'import numpy as np\n'), ((5561, 5607), 'numpy.dot', 'np.dot', (['bodyPlaneCrossProduct', 'bodyPlanePoint3'], {}), '(bodyPlaneCrossProduct, bodyPlanePoint3)\n', (5567, 5607), True, 'import numpy as np\n'), ((6649, 6811), 'math.sqrt', 'math.sqrt', (['((self.rightShoulderX - self.rightElbowX) ** 2 + (self.rightShoulderY -\n self.rightElbowY) ** 2 + (self.rightShoulderZ - self.rightElbowZ) ** 2)'], {}), '((self.rightShoulderX - self.rightElbowX) ** 2 + (self.\n rightShoulderY - self.rightElbowY) ** 2 + (self.rightShoulderZ - self.\n rightElbowZ) ** 2)\n', (6658, 6811), False, 'import math\n'), ((8388, 8535), 'numpy.array', 'np.array', (['[closestPointOnBodyPlaneToRightElbowPointX,\n closestPointOnBodyPlaneToRightElbowPointY,\n closestPointOnBodyPlaneToRightElbowPointZ]'], {}), '([closestPointOnBodyPlaneToRightElbowPointX,\n closestPointOnBodyPlaneToRightElbowPointY,\n closestPointOnBodyPlaneToRightElbowPointZ])\n', (8396, 8535), True, 'import numpy as np\n'), ((8564, 8637), 'numpy.array', 'np.array', (['[self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ]'], {}), '([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])\n', (8572, 8637), True, 'import numpy as np\n'), ((8673, 8849), 'numpy.array', 'np.array', (['[closestPointOnBodyPlaneToRightElbowPoint[0],\n closestPointOnBodyPlaneToRightElbowPoint[1], \n closestPointOnBodyPlaneToRightElbowPoint[2] + deltaZRightUpperArm]'], {}), '([closestPointOnBodyPlaneToRightElbowPoint[0],\n closestPointOnBodyPlaneToRightElbowPoint[1], \n closestPointOnBodyPlaneToRightElbowPoint[2] + deltaZRightUpperArm])\n', (8681, 8849), True, 'import numpy as np\n'), ((8876, 8940), 'numpy.array', 'np.array', (['[self.rightElbowX, self.rightElbowY, self.rightElbowZ]'], {}), '([self.rightElbowX, self.rightElbowY, self.rightElbowZ])\n', (8884, 8940), True, 'import numpy as np\n'), ((9158, 9220), 'numpy.cross', 'np.cross', (['rightUpperArmPlaneVector1', 'rightUpperArmPlaneVector2'], {}), '(rightUpperArmPlaneVector1, rightUpperArmPlaneVector2)\n', (9166, 9220), True, 'import numpy as np\n'), ((9387, 9460), 'numpy.array', 'np.array', (['[self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ]'], {}), '([self.rightShoulderX, self.rightShoulderY, self.rightShoulderZ])\n', (9395, 9460), True, 'import numpy as np\n'), ((9491, 9555), 'numpy.array', 'np.array', (['[self.rightElbowX, self.rightElbowY, self.rightElbowZ]'], {}), '([self.rightElbowX, self.rightElbowY, self.rightElbowZ])\n', (9499, 9555), True, 'import numpy as np\n'), ((9586, 9647), 'numpy.array', 'np.array', (['[self.rightHandX, self.rightHandY, self.rightHandZ]'], {}), '([self.rightHandX, self.rightHandY, self.rightHandZ])\n', (9594, 9647), True, 'import numpy as np\n'), ((9830, 9882), 'numpy.cross', 'np.cross', (['rightArmPlaneVector1', 'rightArmPlaneVector2'], {}), '(rightArmPlaneVector1, rightArmPlaneVector2)\n', (9838, 9882), True, 'import numpy as np\n'), ((10680, 10786), 'math.sqrt', 'math.sqrt', (['(rightArmPlaneVector1[0] ** 2 + rightArmPlaneVector1[1] ** 2 + \n rightArmPlaneVector1[2] ** 2)'], {}), '(rightArmPlaneVector1[0] ** 2 + rightArmPlaneVector1[1] ** 2 + \n rightArmPlaneVector1[2] ** 2)\n', (10689, 10786), False, 'import math\n'), ((10822, 10928), 'math.sqrt', 'math.sqrt', (['(rightArmPlaneVector2[0] ** 2 + rightArmPlaneVector2[1] ** 2 + \n rightArmPlaneVector2[2] ** 2)'], {}), '(rightArmPlaneVector2[0] ** 2 + rightArmPlaneVector2[1] ** 2 + \n rightArmPlaneVector2[2] ** 2)\n', (10831, 10928), False, 'import math\n'), ((11305, 11369), 'numpy.array', 'np.array', (['[self.rightElbowX, self.rightElbowY, self.rightElbowZ]'], {}), '([self.rightElbowX, self.rightElbowY, self.rightElbowZ])\n', (11313, 11369), True, 'import numpy as np\n'), ((11404, 11468), 'numpy.array', 'np.array', (['[self.rightWristX, self.rightWristY, self.rightWristZ]'], {}), '([self.rightWristX, self.rightWristY, self.rightWristZ])\n', (11412, 11468), True, 'import numpy as np\n'), ((11503, 11567), 'numpy.array', 'np.array', (['[self.rightThumbX, self.rightThumbY, self.rightThumbZ]'], {}), '([self.rightThumbX, self.rightThumbY, self.rightThumbZ])\n', (11511, 11567), True, 'import numpy as np\n'), ((11778, 11838), 'numpy.cross', 'np.cross', (['rightForearmPlaneVector1', 'rightForearmPlaneVector2'], {}), '(rightForearmPlaneVector1, rightForearmPlaneVector2)\n', (11786, 11838), True, 'import numpy as np\n'), ((12519, 12669), 'math.sqrt', 'math.sqrt', (['((self.leftShoulderX - self.leftElbowX) ** 2 + (self.leftShoulderY - self.\n leftElbowY) ** 2 + (self.leftShoulderZ - self.leftElbowZ) ** 2)'], {}), '((self.leftShoulderX - self.leftElbowX) ** 2 + (self.leftShoulderY -\n self.leftElbowY) ** 2 + (self.leftShoulderZ - self.leftElbowZ) ** 2)\n', (12528, 12669), False, 'import math\n'), ((14223, 14367), 'numpy.array', 'np.array', (['[closestPointOnBodyPlaneToLeftElbowPointX,\n closestPointOnBodyPlaneToLeftElbowPointY,\n closestPointOnBodyPlaneToLeftElbowPointZ]'], {}), '([closestPointOnBodyPlaneToLeftElbowPointX,\n closestPointOnBodyPlaneToLeftElbowPointY,\n closestPointOnBodyPlaneToLeftElbowPointZ])\n', (14231, 14367), True, 'import numpy as np\n'), ((14395, 14465), 'numpy.array', 'np.array', (['[self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ]'], {}), '([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])\n', (14403, 14465), True, 'import numpy as np\n'), ((14500, 14672), 'numpy.array', 'np.array', (['[closestPointOnBodyPlaneToLeftElbowPoint[0],\n closestPointOnBodyPlaneToLeftElbowPoint[1], \n closestPointOnBodyPlaneToLeftElbowPoint[2] + deltaZLeftUpperArm]'], {}), '([closestPointOnBodyPlaneToLeftElbowPoint[0],\n closestPointOnBodyPlaneToLeftElbowPoint[1], \n closestPointOnBodyPlaneToLeftElbowPoint[2] + deltaZLeftUpperArm])\n', (14508, 14672), True, 'import numpy as np\n'), ((14698, 14759), 'numpy.array', 'np.array', (['[self.leftElbowX, self.leftElbowY, self.leftElbowZ]'], {}), '([self.leftElbowX, self.leftElbowY, self.leftElbowZ])\n', (14706, 14759), True, 'import numpy as np\n'), ((14970, 15030), 'numpy.cross', 'np.cross', (['leftUpperArmPlaneVector1', 'leftUpperArmPlaneVector2'], {}), '(leftUpperArmPlaneVector1, leftUpperArmPlaneVector2)\n', (14978, 15030), True, 'import numpy as np\n'), ((15192, 15262), 'numpy.array', 'np.array', (['[self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ]'], {}), '([self.leftShoulderX, self.leftShoulderY, self.leftShoulderZ])\n', (15200, 15262), True, 'import numpy as np\n'), ((15292, 15353), 'numpy.array', 'np.array', (['[self.leftElbowX, self.leftElbowY, self.leftElbowZ]'], {}), '([self.leftElbowX, self.leftElbowY, self.leftElbowZ])\n', (15300, 15353), True, 'import numpy as np\n'), ((15383, 15441), 'numpy.array', 'np.array', (['[self.leftHandX, self.leftHandY, self.leftHandZ]'], {}), '([self.leftHandX, self.leftHandY, self.leftHandZ])\n', (15391, 15441), True, 'import numpy as np\n'), ((15617, 15667), 'numpy.cross', 'np.cross', (['leftArmPlaneVector1', 'leftArmPlaneVector2'], {}), '(leftArmPlaneVector1, leftArmPlaneVector2)\n', (15625, 15667), True, 'import numpy as np\n'), ((16448, 16551), 'math.sqrt', 'math.sqrt', (['(leftArmPlaneVector1[0] ** 2 + leftArmPlaneVector1[1] ** 2 + \n leftArmPlaneVector1[2] ** 2)'], {}), '(leftArmPlaneVector1[0] ** 2 + leftArmPlaneVector1[1] ** 2 + \n leftArmPlaneVector1[2] ** 2)\n', (16457, 16551), False, 'import math\n'), ((16586, 16689), 'math.sqrt', 'math.sqrt', (['(leftArmPlaneVector2[0] ** 2 + leftArmPlaneVector2[1] ** 2 + \n leftArmPlaneVector2[2] ** 2)'], {}), '(leftArmPlaneVector2[0] ** 2 + leftArmPlaneVector2[1] ** 2 + \n leftArmPlaneVector2[2] ** 2)\n', (16595, 16689), False, 'import math\n'), ((17057, 17118), 'numpy.array', 'np.array', (['[self.leftElbowX, self.leftElbowY, self.leftElbowZ]'], {}), '([self.leftElbowX, self.leftElbowY, self.leftElbowZ])\n', (17065, 17118), True, 'import numpy as np\n'), ((17152, 17213), 'numpy.array', 'np.array', (['[self.leftWristX, self.leftWristY, self.leftWristZ]'], {}), '([self.leftWristX, self.leftWristY, self.leftWristZ])\n', (17160, 17213), True, 'import numpy as np\n'), ((17247, 17308), 'numpy.array', 'np.array', (['[self.leftThumbX, self.leftThumbY, self.leftThumbZ]'], {}), '([self.leftThumbX, self.leftThumbY, self.leftThumbZ])\n', (17255, 17308), True, 'import numpy as np\n'), ((17512, 17570), 'numpy.cross', 'np.cross', (['leftForearmPlaneVector1', 'leftForearmPlaneVector2'], {}), '(leftForearmPlaneVector1, leftForearmPlaneVector2)\n', (17520, 17570), True, 'import numpy as np\n'), ((6121, 6138), 'math.radians', 'math.radians', (['(-90)'], {}), '(-90)\n', (6133, 6138), False, 'import math\n'), ((7121, 7212), 'math.sqrt', 'math.sqrt', (['(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)'], {}), '(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + \n bodyPlaneEquationC ** 2)\n', (7130, 7212), False, 'import math\n'), ((7354, 7373), 'math.radians', 'math.radians', (['(119.5)'], {}), '(119.5)\n', (7366, 7373), False, 'import math\n'), ((7475, 7494), 'math.radians', 'math.radians', (['(-89.5)'], {}), '(-89.5)\n', (7487, 7494), False, 'import math\n'), ((11250, 11268), 'math.radians', 'math.radians', (['(89.5)'], {}), '(89.5)\n', (11262, 11268), False, 'import math\n'), ((12978, 13069), 'math.sqrt', 'math.sqrt', (['(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)'], {}), '(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + \n bodyPlaneEquationC ** 2)\n', (12987, 13069), False, 'import math\n'), ((13208, 13227), 'math.radians', 'math.radians', (['(119.5)'], {}), '(119.5)\n', (13220, 13227), False, 'import math\n'), ((13325, 13343), 'math.radians', 'math.radians', (['(89.5)'], {}), '(89.5)\n', (13337, 13343), False, 'import math\n'), ((6102, 6118), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (6114, 6118), False, 'import math\n'), ((7260, 7329), 'math.atan2', 'math.atan2', (['deltaZRightUpperArm', 'distanceRightElbowPointFromBodyPlane'], {}), '(deltaZRightUpperArm, distanceRightElbowPointFromBodyPlane)\n', (7270, 7329), False, 'import math\n'), ((7331, 7351), 'math.radians', 'math.radians', (['(-119.5)'], {}), '(-119.5)\n', (7343, 7351), False, 'import math\n'), ((11147, 11164), 'math.radians', 'math.radians', (['(180)'], {}), '(180)\n', (11159, 11164), False, 'import math\n'), ((13116, 13183), 'math.atan2', 'math.atan2', (['deltaZLeftUpperArm', 'distanceLeftElbowPointFromBodyPlane'], {}), '(deltaZLeftUpperArm, distanceLeftElbowPointFromBodyPlane)\n', (13126, 13183), False, 'import math\n'), ((13185, 13205), 'math.radians', 'math.radians', (['(-119.5)'], {}), '(-119.5)\n', (13197, 13205), False, 'import math\n'), ((17003, 17021), 'math.radians', 'math.radians', (['(89.5)'], {}), '(89.5)\n', (17015, 17021), False, 'import math\n'), ((5871, 5962), 'math.sqrt', 'math.sqrt', (['(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)'], {}), '(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + \n bodyPlaneEquationC ** 2)\n', (5880, 5962), False, 'import math\n'), ((5960, 6051), 'math.sqrt', 'math.sqrt', (['(headPlaneEquationA ** 2 + headPlaneEquationB ** 2 + headPlaneEquationC ** 2)'], {}), '(headPlaneEquationA ** 2 + headPlaneEquationB ** 2 + \n headPlaneEquationC ** 2)\n', (5969, 6051), False, 'import math\n'), ((6453, 6544), 'math.sqrt', 'math.sqrt', (['(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + bodyPlaneEquationC ** 2)'], {}), '(bodyPlaneEquationA ** 2 + bodyPlaneEquationB ** 2 + \n bodyPlaneEquationC ** 2)\n', (6462, 6544), False, 'import math\n'), ((6542, 6613), 'math.sqrt', 'math.sqrt', (['(headVector[0] ** 2 + headVector[1] ** 2 + headVector[2] ** 2)'], {}), '(headVector[0] ** 2 + headVector[1] ** 2 + headVector[2] ** 2)\n', (6551, 6613), False, 'import math\n'), ((10304, 10421), 'math.sqrt', 'math.sqrt', (['(rightUpperArmPlaneEquationA ** 2 + rightUpperArmPlaneEquationB ** 2 + \n rightUpperArmPlaneEquationC ** 2)'], {}), '(rightUpperArmPlaneEquationA ** 2 + rightUpperArmPlaneEquationB **\n 2 + rightUpperArmPlaneEquationC ** 2)\n', (10313, 10421), False, 'import math\n'), ((10420, 10523), 'math.sqrt', 'math.sqrt', (['(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + \n rightArmPlaneEquationC ** 2)'], {}), '(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + \n rightArmPlaneEquationC ** 2)\n', (10429, 10523), False, 'import math\n'), ((11177, 11247), 'numpy.dot', 'np.dot', (['rightArmPlaneVector1Normalized', 'rightArmPlaneVector2Normalized'], {}), '(rightArmPlaneVector1Normalized, rightArmPlaneVector2Normalized)\n', (11183, 11247), True, 'import numpy as np\n'), ((12273, 12376), 'math.sqrt', 'math.sqrt', (['(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + \n rightArmPlaneEquationC ** 2)'], {}), '(rightArmPlaneEquationA ** 2 + rightArmPlaneEquationB ** 2 + \n rightArmPlaneEquationC ** 2)\n', (12282, 12376), False, 'import math\n'), ((12374, 12488), 'math.sqrt', 'math.sqrt', (['(rightForearmPlaneEquationA ** 2 + rightForearmPlaneEquationB ** 2 + \n rightForearmPlaneEquationC ** 2)'], {}), '(rightForearmPlaneEquationA ** 2 + rightForearmPlaneEquationB ** 2 +\n rightForearmPlaneEquationC ** 2)\n', (12383, 12488), False, 'import math\n'), ((16902, 16919), 'math.radians', 'math.radians', (['(180)'], {}), '(180)\n', (16914, 16919), False, 'import math\n'), ((16079, 16193), 'math.sqrt', 'math.sqrt', (['(leftUpperArmPlaneEquationA ** 2 + leftUpperArmPlaneEquationB ** 2 + \n leftUpperArmPlaneEquationC ** 2)'], {}), '(leftUpperArmPlaneEquationA ** 2 + leftUpperArmPlaneEquationB ** 2 +\n leftUpperArmPlaneEquationC ** 2)\n', (16088, 16193), False, 'import math\n'), ((16192, 16292), 'math.sqrt', 'math.sqrt', (['(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + \n leftArmPlaneEquationC ** 2)'], {}), '(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + \n leftArmPlaneEquationC ** 2)\n', (16201, 16292), False, 'import math\n'), ((16932, 17000), 'numpy.dot', 'np.dot', (['leftArmPlaneVector1Normalized', 'leftArmPlaneVector2Normalized'], {}), '(leftArmPlaneVector1Normalized, leftArmPlaneVector2Normalized)\n', (16938, 17000), True, 'import numpy as np\n'), ((17995, 18095), 'math.sqrt', 'math.sqrt', (['(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + \n leftArmPlaneEquationC ** 2)'], {}), '(leftArmPlaneEquationA ** 2 + leftArmPlaneEquationB ** 2 + \n leftArmPlaneEquationC ** 2)\n', (18004, 18095), False, 'import math\n'), ((18093, 18204), 'math.sqrt', 'math.sqrt', (['(leftForearmPlaneEquationA ** 2 + leftForearmPlaneEquationB ** 2 + \n leftForearmPlaneEquationC ** 2)'], {}), '(leftForearmPlaneEquationA ** 2 + leftForearmPlaneEquationB ** 2 +\n leftForearmPlaneEquationC ** 2)\n', (18102, 18204), False, 'import math\n')] |
'''
Created on Sep 21, 2017
@author: simulant
'''
import os
import numpy as np
import csv
def READ_CSV_DATA(filename, delimiter=",", skip_header=6):
"""
Reads data from csv-file into a numpy recarray
First ROW: Variable Name
Second ROW: Data Type
Third ROW: EXPORT DATA To RasterLayer (1 if True)
Four ROW: Scaling Factor:
Five: Unit
"""
if os.path.exists(filename) and os.path.isfile(filename):
filename = filename
else:
exstring = "File " + filename + " does not exist."
raise IOError(exstring)
mapping = np.ndarray(shape=(0,0))
"""
try:
print("Loading format from file {0}".format(filename))
np_version = np.__version__.split(".")
if int(np_version[0])*100 + int(np_version[1]) >=116:
mapping = np.loadtxt(filename, dtype="U1024", delimiter=delimiter, max_rows=10)
else:
mapping = np.loadtxt(filename, dtype="U1024", delimiter=delimiter)
print("Imported format specification from textfile: {0}".format(filename))
except (ValueError, TypeError) as ex:
args = list(ex.args)
for i in range(len(args)):
args[i] = str(args[i])
error_msg = ("Error while importing the format specification from textfile {0}: {1}".format(filename, "".join(args)))
raise IOError(error_msg)
except Exception as ex:
print (ex)
"""
mapping_list =[]
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
mapping_list.append(row)
line_count += 1
if line_count >= skip_header or line_count > 5:
break
mapping = np.asarray(mapping_list, dtype="U1024")
#get Type and Name
dtype = []
EXPORT_COLUMNS = []
SCALING_FACTOR = []
CUTOFF_Value = []
j = 0
for r in range(min(6, mapping.shape[0])):
for col in range(mapping.shape[1]):
if mapping[r, col].startswith("b'") and mapping[r, col].endswith("'"):
mapping[r, col] = mapping[r, col][2:-1]
for row in mapping.transpose():
if row[0] == "":
j = j + 1
dtype.append(("emptycol_%i"%j, "f4"))
else:
if row[1].startswith("S"):
row[1] = "U" + row[1][1:]
dtype.append((row[0], row[1]))
if row[2].strip() == "1":
EXPORT_COLUMNS.append(row[0])
try:
SCALING_FACTOR.append(eval(row[3]))
except:
SCALING_FACTOR = [0]
try:
CUTOFF_Value.append(eval(row[5]))
except:
CUTOFF_Value = [0]
DATA = np.genfromtxt(filename, dtype=dtype, skip_header=skip_header,
delimiter=delimiter, missing_values="0", filling_values = "0" )
return DATA, EXPORT_COLUMNS, SCALING_FACTOR, CUTOFF_Value | [
"os.path.exists",
"numpy.asarray",
"os.path.isfile",
"numpy.ndarray",
"csv.reader",
"numpy.genfromtxt"
] | [((589, 613), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0, 0)'}), '(shape=(0, 0))\n', (599, 613), True, 'import numpy as np\n'), ((1792, 1831), 'numpy.asarray', 'np.asarray', (['mapping_list'], {'dtype': '"""U1024"""'}), "(mapping_list, dtype='U1024')\n", (1802, 1831), True, 'import numpy as np\n'), ((2917, 3044), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'dtype': 'dtype', 'skip_header': 'skip_header', 'delimiter': 'delimiter', 'missing_values': '"""0"""', 'filling_values': '"""0"""'}), "(filename, dtype=dtype, skip_header=skip_header, delimiter=\n delimiter, missing_values='0', filling_values='0')\n", (2930, 3044), True, 'import numpy as np\n'), ((386, 410), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (400, 410), False, 'import os\n'), ((415, 439), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (429, 439), False, 'import os\n'), ((1540, 1575), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1550, 1575), False, 'import csv\n')] |
from mss import mss
import cv2
from PIL import Image
import numpy as np
from threading import Lock
from PIL import Image
from Xlib import X
import ewmh
class CaptureMonitor:
MODE_CROP, MODE_WINDOW = list(range(2))
def __init__(self, bb):
self.lock = Lock()
self.screen_capture = mss()
self.ewmh_capture = ewmh.EWMH()
self.current_window = None
self.current_monitor = {'left': None, 'top': None, 'width': None, 'height': None}
self.mode = CaptureMonitor.MODE_CROP
self.change_coords(bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1])
self.max_resolution = 920
self.min_resolution = 320
def screen_resize(self, im, size_axis):
if im.shape[0] > im.shape[1]:
h = size_axis
w = h * (im.shape[1] / im.shape[0])
else:
w = size_axis
h = w * (im.shape[0] / im.shape[1])
h = int(h)
w = int(w)
return cv2.resize(im, (w, h))
def get_frame(self):
with self.lock:
if self.mode == CaptureMonitor.MODE_WINDOW and self.current_window is not None:
geo = self.current_window.get_geometry()
raw_im = self.current_window.get_image(0, 0, geo.width, geo.height, X.ZPixmap, 0xffffffff)
im = Image.frombytes("RGB", (geo.width, geo.height), raw_im.data, "raw", "BGRX")
else:
im = Image.frombytes('RGB', self.size, self.screen_capture.grab(self.current_monitor).rgb)
im = np.array(im) # RGB
if im.shape[0] < self.min_resolution or im.shape[1] < self.min_resolution:
return self.screen_resize(im, self.min_resolution)
elif im.shape[0] > self.max_resolution or im.shape[1] > self.max_resolution:
return self.screen_resize(im, self.max_resolution)
return im
def get_windows(self):
windows = []
for w in self.ewmh_capture.getClientList():
name = w.get_wm_name().strip()
if name == '':
continue
windows.append(name)
return ['Active window'] + windows
def set_window(self, name):
with self.lock:
if name == 'Active window':
self.current_window = self.ewmh_capture.getActiveWindow()
return
for w in self.ewmh_capture.getClientList():
if name == w.get_wm_name().strip():
self.current_window = w
return
def set_mode(self, mode):
with self.lock:
self.mode = mode
def change_coords(self, left, top, width, height):
with self.lock:
left = int(left)
top = int(top)
width = int(width)
height = int(height)
self.size = width, height
self.current_monitor.update({'left': left, 'top': top, 'width': self.size[0], 'height': self.size[1]})
| [
"mss.mss",
"threading.Lock",
"ewmh.EWMH",
"numpy.array",
"PIL.Image.frombytes",
"cv2.resize"
] | [((268, 274), 'threading.Lock', 'Lock', ([], {}), '()\n', (272, 274), False, 'from threading import Lock\n'), ((305, 310), 'mss.mss', 'mss', ([], {}), '()\n', (308, 310), False, 'from mss import mss\n'), ((339, 350), 'ewmh.EWMH', 'ewmh.EWMH', ([], {}), '()\n', (348, 350), False, 'import ewmh\n'), ((962, 984), 'cv2.resize', 'cv2.resize', (['im', '(w, h)'], {}), '(im, (w, h))\n', (972, 984), False, 'import cv2\n'), ((1543, 1555), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1551, 1555), True, 'import numpy as np\n'), ((1312, 1387), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', '(geo.width, geo.height)', 'raw_im.data', '"""raw"""', '"""BGRX"""'], {}), "('RGB', (geo.width, geo.height), raw_im.data, 'raw', 'BGRX')\n", (1327, 1387), False, 'from PIL import Image\n')] |
# Load required libraries
import numpy as np
import pandas as pd
import os
import glob
from PIL import Image
import pydicom
import PIL
import imageio
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import seaborn as sns
import tqdm
import cv2
import png
import pylab
import math
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision
from pydicom import dcmread, read_file
from collections import Counter
import plotly.express as px
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import sklearn.metrics as metrics
from glob import glob
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import Dataset, DataLoader, sampler
from torchvision import models
import torch.nn as nn
#from torchsummary import summary
from torchvision import transforms, datasets, models
import torch
from torch import optim, cuda
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
from timeit import default_timer as timer
from fastai.basics import *
from fastai.medical.imaging import *
from fastai.vision.all import *
from functools import partial
train_on_gpu = cuda.is_available()
def target_slice(col, targ_pct):
"""Select the most appropriate slice in a given series using DICOM attributes SliceLocation and InstanceNumber"""
fns = [f for f in os.listdir(col)]
try:
slice_num = lambda x: pydicom.dcmread(col + '/' + x, stop_before_pixels=True).SliceLocation
except AttributeError:
print('SliceLocation Attribute not found')
else:
slice_num = lambda x: pydicom.dcmread(col + '/' + x, stop_before_pixels=True).InstanceNumber
finally:
fns = list(sorted(fns, key=slice_num))
img_ct = len(fns)
idx = math.floor(img_ct * targ_pct)
targ_fn = fns[idx]
return targ_fn
class DicomDataset(Dataset):
"Custom Pytorch Dataset class for feeding directly DICOM data into CNN."
def __init__(self, csv_file, transform=None):
self.csv_file = csv_file
self.transform = transform
self.df = pd.read_csv(csv_file)
self.annotations = self.df['label']
def __len__(self):
return len(self.annotations) # should be 476 for axial
def __getitem__(self, index):
img_path = self.df.loc[index, 'full_fpath']
ds = pydicom.dcmread(img_path)
img = ds.pixel_array
img_min = np.percentile(img, 2.5)
img_max = np.percentile(img, 97.5)
img_clipped = np.clip(img, img_min, img_max)
img_norm = (img_clipped - img_min) / (img_max - img_min)
img_resize = cv2.resize(img_norm, (224, 224))
img_3d = np.repeat(img_resize[..., np.newaxis], 3, -1)
img_tensor = torch.from_numpy(img_3d).permute(2, 0, 1)
img_tensor = img_tensor.float()
pid = self.df.loc[index, 'PatientID']
y_label = torch.tensor(int(self.df.iloc[index, 2]))
if self.transform:
img_tensor = self.transform(img_tensor)
return(img_tensor, y_label, pid)
def model_train(model,
criterion,
optimizer,
train_loader,
valid_loader,
save_file_name,
max_epochs_stop=3,
n_epochs=10,
print_every=1):
"""Train a PyTorch Model
Params
--------
model (PyTorch model): cnn to train
criterion (PyTorch loss): objective to minimize
optimizer (PyTorch optimizier): optimizer to compute gradients of model parameters
train_loader (PyTorch dataloader): training dataloader to iterate through
valid_loader (PyTorch dataloader): validation dataloader used for early stopping
save_file_name (str ending in '.pt'): file path to save the model state dict
max_epochs_stop (int): maximum number of epochs with no improvement in validation loss for early stopping
n_epochs (int): maximum number of training epochs
print_every (int): frequency of epochs to print training stats
Returns
--------
model (PyTorch model): trained cnn with best weights
history (DataFrame): history of train and validation loss and accuracy
"""
# Early stopping intialization
epochs_no_improve = 0
valid_loss_min = np.Inf
valid_max_acc = 0
history = []
# Number of epochs already trained (if using loaded in model weights)
try:
print(f'Model has been trained for: {model.epochs} epochs.\n')
except:
model.epochs = 0
print(f'Starting Training from Scratch.\n')
overall_start = timer()
# Main loop
for epoch in range(n_epochs):
# keep track of training and validation loss each epoch
train_loss = 0.0
valid_loss = 0.0
train_acc = 0
valid_acc = 0
# Set to training
model.train()
start = timer()
# Training loop
for ii, (data, target, pid) in enumerate(train_loader):
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Clear gradients
optimizer.zero_grad()
# Predicted outputs are log probabilities
output = model(data)
# Loss and backpropagation of gradients
loss = criterion(output, target)
loss.backward()
# Update the parameters
optimizer.step()
# Track train loss by multiplying average loss by number of examples in batch
train_loss += loss.item() * data.size(0)
# Calculate accuracy by finding max log probability
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
# Need to convert correct tensor from int to float to average
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples in batch
train_acc += accuracy.item() * data.size(0)
# Track training progress
print(
f'Epoch: {epoch}\t{100 * (ii + 1) / len(train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.',
end='\r')
# After training loops ends, start validation
else:
model.epochs += 1
# Don't need to keep track of gradients
with torch.no_grad():
# Set to evaluation mode
model.eval()
# Validation loop
for data, target, pid in valid_loader:
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass
output = model(data)
# Validation loss
loss = criterion(output, target)
# Multiply average loss times the number of examples in batch
valid_loss += loss.item() * data.size(0)
# Calculate validation accuracy
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(
correct_tensor.type(torch.FloatTensor))
# Multiply average accuracy times the number of examples
valid_acc += accuracy.item() * data.size(0)
# Calculate average losses
train_loss = train_loss / len(train_loader.dataset)
valid_loss = valid_loss / len(valid_loader.dataset)
# Calculate average accuracy
train_acc = train_acc / len(train_loader.dataset)
valid_acc = valid_acc / len(valid_loader.dataset)
history.append([train_loss, valid_loss, train_acc, valid_acc])
# Print training and validation results
if (epoch + 1) % print_every == 0:
print(
f'\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}'
)
print(
f'\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%'
)
# Save the model if validation loss decreases
if valid_loss < valid_loss_min:
# Save model
torch.save(model.state_dict(), save_file_name)
# Track improvement
epochs_no_improve = 0
valid_loss_min = valid_loss
valid_best_acc = valid_acc
best_epoch = epoch
# Otherwise increment count of epochs with no improvement
else:
epochs_no_improve += 1
# Trigger early stopping
if epochs_no_improve >= max_epochs_stop:
print(
f'\nEarly Stopping! Total epochs: {epoch}. Best epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%'
)
total_time = timer() - overall_start
print(
f'{total_time:.2f} total seconds elapsed. {total_time / (epoch+1):.2f} seconds per epoch.'
)
# Load the best state dict
model.load_state_dict(torch.load(save_file_name))
# Attach the optimizer
model.optimizer = optimizer
# Format history
history = pd.DataFrame(
history,
columns=[
'train_loss', 'valid_loss', 'train_acc',
'valid_acc'
])
return model, history
# Attach the optimizer
model.optimizer = optimizer
# Record overall time and print out stats
total_time = timer() - overall_start
print(
f'\nBest epoch: {best_epoch} with loss: {valid_loss_min:.2f} and acc: {100 * valid_acc:.2f}%'
)
print(
f'{total_time:.2f} total seconds elapsed. {total_time / (epoch):.2f} seconds per epoch.'
)
# Format history
history = pd.DataFrame(
history,
columns=['train_loss', 'valid_loss', 'train_acc', 'valid_acc'])
return model, history
def loss_accuracy(history, slice):
"""Plot loss accuracy curves for training & validation sets"""
plt.figure(figsize=plt.figaspect(0.5))
plt.subplot(1, 2, 1)
for c in ['train_loss', 'valid_loss']:
plt.plot(history[c], label=c)
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Average Negative Log Likelihood')
plt.title(f'Training and Validation Losses - {slice}')
plt.tight_layout()
plt.subplot(1, 2, 2)
for c in ['train_acc', 'valid_acc']:
plt.plot(100 * history[c], label=c)
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Average Accuracy')
plt.title(f'Training and Validation Accuracy - {slice}')
plt.tight_layout()
plt.show()
def model_eval(model, valid_loader):
"""Put model in evaluation mode and test on validation and test set"""
with torch.no_grad():
# Set to evaluation mode
model.eval() # change here
patient_id = []
y_true = []
prob = []
score = []
y_pred = []
# Validation loop
for data, target, pid in valid_loader: # change here
# Tensors to gpu
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# Forward pass - calculate prediction probability
score = torch.exp(model(data))
score = score.cpu()
target = target.cpu()
# Predict label for the slice
_, pred = torch.max(score, dim=1)
y_pred = pred.cpu()
y_true.append(int(target))
prob.append(float(score[:, 1]))
patient_id.append(''.join(pid))
# score.append(score)
# pred.append(int(pred))
return pd.DataFrame(list(zip(patient_id, y_true, prob)),
columns=['pid', 'y_true', 'prob'])
def roc_pr(df, slice, data):
"""Calculate the fpr and tpr for all thresholds of the classification and plot ROC curve"""
y_test = df['y_true']
preds = df['prob']
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
precision, recall, thresholds = metrics.precision_recall_curve(y_test, preds)
area = metrics.auc(recall, precision)
plt.figure(figsize=plt.figaspect(0.5))
plt.subplot(1, 2, 1)
plt.title(f'ROC Curve - {slice} - {data}')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.tight_layout()
plt.subplot(1, 2, 2)
plt.title(f'PR Curve - {slice} - {data}')
plt.plot(recall, precision, 'b--', label = 'Area under PR Curve = %0.2f' % area)
plt.legend(loc = 'lower right')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.tight_layout()
plt.show()
def df_pred(pred_ax, pred_cor_1, pred_cor_2):
"""Returns dataframe of predicted probability scores for each slice for a given set"""
pred_cor = pred_cor_1.merge(pred_cor_2, how='inner', on='pid')
pred_cor.rename(columns={'prob_x':'p_cor_1', 'prob_y':'p_cor_2'}, inplace=True)
pred_cor.drop(['y_true_x', 'y_true_y'], axis=1, inplace=True)
pred = pred_ax.merge(pred_cor, how='inner', on='pid')
pred.rename(columns={'prob':'p_ax'}, inplace=True)
return pred
def check_duplicate(df1, df2):
"""Check if any duplicate PatientID is present across given 2 dataframes"""
df_check = df1.merge(df2, how='outer', on='PatientID', indicator='True')
df_check = df_check[df_check['True'] == 'both']
print("No Duplicate PatientID found.") if len(df_check.index)==0 else print("Duplicate PatientID found.")
| [
"numpy.clip",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.floor",
"sklearn.metrics.auc",
"torch.max",
"torch.from_numpy",
"torch.cuda.is_available",
"sklearn.metrics.roc_curve",
"os.listdir",
"numpy.repeat",
"pydicom.dcmread",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
... | [((1363, 1382), 'torch.cuda.is_available', 'cuda.is_available', ([], {}), '()\n', (1380, 1382), False, 'from torch import optim, cuda\n'), ((4845, 4852), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4850, 4852), True, 'from timeit import default_timer as timer\n'), ((10739, 10828), 'pandas.DataFrame', 'pd.DataFrame', (['history'], {'columns': "['train_loss', 'valid_loss', 'train_acc', 'valid_acc']"}), "(history, columns=['train_loss', 'valid_loss', 'train_acc',\n 'valid_acc'])\n", (10751, 10828), True, 'import pandas as pd\n'), ((11021, 11041), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11032, 11041), True, 'import matplotlib.pyplot as plt\n'), ((11127, 11139), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11137, 11139), True, 'import matplotlib.pyplot as plt\n'), ((11144, 11163), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (11154, 11163), True, 'import matplotlib.pyplot as plt\n'), ((11168, 11213), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Negative Log Likelihood"""'], {}), "('Average Negative Log Likelihood')\n", (11178, 11213), True, 'import matplotlib.pyplot as plt\n'), ((11218, 11272), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training and Validation Losses - {slice}"""'], {}), "(f'Training and Validation Losses - {slice}')\n", (11227, 11272), True, 'import matplotlib.pyplot as plt\n'), ((11277, 11295), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11293, 11295), True, 'import matplotlib.pyplot as plt\n'), ((11305, 11325), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (11316, 11325), True, 'import matplotlib.pyplot as plt\n'), ((11415, 11427), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11425, 11427), True, 'import matplotlib.pyplot as plt\n'), ((11432, 11451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (11442, 11451), True, 'import matplotlib.pyplot as plt\n'), ((11456, 11486), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Accuracy"""'], {}), "('Average Accuracy')\n", (11466, 11486), True, 'import matplotlib.pyplot as plt\n'), ((11491, 11547), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training and Validation Accuracy - {slice}"""'], {}), "(f'Training and Validation Accuracy - {slice}')\n", (11500, 11547), True, 'import matplotlib.pyplot as plt\n'), ((11552, 11570), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11568, 11570), True, 'import matplotlib.pyplot as plt\n'), ((11580, 11590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11588, 11590), True, 'import matplotlib.pyplot as plt\n'), ((12940, 12972), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_test', 'preds'], {}), '(y_test, preds)\n', (12957, 12972), True, 'import sklearn.metrics as metrics\n'), ((12987, 13008), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12998, 13008), True, 'import sklearn.metrics as metrics\n'), ((13046, 13091), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['y_test', 'preds'], {}), '(y_test, preds)\n', (13076, 13091), True, 'import sklearn.metrics as metrics\n'), ((13103, 13133), 'sklearn.metrics.auc', 'metrics.auc', (['recall', 'precision'], {}), '(recall, precision)\n', (13114, 13133), True, 'import sklearn.metrics as metrics\n'), ((13183, 13203), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13194, 13203), True, 'import matplotlib.pyplot as plt\n'), ((13208, 13250), 'matplotlib.pyplot.title', 'plt.title', (['f"""ROC Curve - {slice} - {data}"""'], {}), "(f'ROC Curve - {slice} - {data}')\n", (13217, 13250), True, 'import matplotlib.pyplot as plt\n'), ((13255, 13309), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', '"""b"""'], {'label': "('AUC = %0.2f' % roc_auc)"}), "(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n", (13263, 13309), True, 'import matplotlib.pyplot as plt\n'), ((13316, 13345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13326, 13345), True, 'import matplotlib.pyplot as plt\n'), ((13352, 13383), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (13360, 13383), True, 'import matplotlib.pyplot as plt\n'), ((13387, 13403), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (13395, 13403), True, 'import matplotlib.pyplot as plt\n'), ((13408, 13424), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (13416, 13424), True, 'import matplotlib.pyplot as plt\n'), ((13429, 13461), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (13439, 13461), True, 'import matplotlib.pyplot as plt\n'), ((13466, 13499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (13476, 13499), True, 'import matplotlib.pyplot as plt\n'), ((13504, 13522), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13520, 13522), True, 'import matplotlib.pyplot as plt\n'), ((13529, 13549), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (13540, 13549), True, 'import matplotlib.pyplot as plt\n'), ((13554, 13595), 'matplotlib.pyplot.title', 'plt.title', (['f"""PR Curve - {slice} - {data}"""'], {}), "(f'PR Curve - {slice} - {data}')\n", (13563, 13595), True, 'import matplotlib.pyplot as plt\n'), ((13600, 13678), 'matplotlib.pyplot.plot', 'plt.plot', (['recall', 'precision', '"""b--"""'], {'label': "('Area under PR Curve = %0.2f' % area)"}), "(recall, precision, 'b--', label='Area under PR Curve = %0.2f' % area)\n", (13608, 13678), True, 'import matplotlib.pyplot as plt\n'), ((13685, 13714), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13695, 13714), True, 'import matplotlib.pyplot as plt\n'), ((13721, 13737), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (13729, 13737), True, 'import matplotlib.pyplot as plt\n'), ((13742, 13758), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (13750, 13758), True, 'import matplotlib.pyplot as plt\n'), ((13763, 13786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (13773, 13786), True, 'import matplotlib.pyplot as plt\n'), ((13791, 13811), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (13801, 13811), True, 'import matplotlib.pyplot as plt\n'), ((13816, 13834), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13832, 13834), True, 'import matplotlib.pyplot as plt\n'), ((13840, 13850), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13848, 13850), True, 'import matplotlib.pyplot as plt\n'), ((1972, 2001), 'math.floor', 'math.floor', (['(img_ct * targ_pct)'], {}), '(img_ct * targ_pct)\n', (1982, 2001), False, 'import math\n'), ((2300, 2321), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (2311, 2321), True, 'import pandas as pd\n'), ((2566, 2591), 'pydicom.dcmread', 'pydicom.dcmread', (['img_path'], {}), '(img_path)\n', (2581, 2591), False, 'import pydicom\n'), ((2640, 2663), 'numpy.percentile', 'np.percentile', (['img', '(2.5)'], {}), '(img, 2.5)\n', (2653, 2663), True, 'import numpy as np\n'), ((2682, 2706), 'numpy.percentile', 'np.percentile', (['img', '(97.5)'], {}), '(img, 97.5)\n', (2695, 2706), True, 'import numpy as np\n'), ((2729, 2759), 'numpy.clip', 'np.clip', (['img', 'img_min', 'img_max'], {}), '(img, img_min, img_max)\n', (2736, 2759), True, 'import numpy as np\n'), ((2846, 2878), 'cv2.resize', 'cv2.resize', (['img_norm', '(224, 224)'], {}), '(img_norm, (224, 224))\n', (2856, 2878), False, 'import cv2\n'), ((2896, 2941), 'numpy.repeat', 'np.repeat', (['img_resize[..., np.newaxis]', '(3)', '(-1)'], {}), '(img_resize[..., np.newaxis], 3, -1)\n', (2905, 2941), True, 'import numpy as np\n'), ((5129, 5136), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5134, 5136), True, 'from timeit import default_timer as timer\n'), ((10447, 10454), 'timeit.default_timer', 'timer', ([], {}), '()\n', (10452, 10454), True, 'from timeit import default_timer as timer\n'), ((11093, 11122), 'matplotlib.pyplot.plot', 'plt.plot', (['history[c]'], {'label': 'c'}), '(history[c], label=c)\n', (11101, 11122), True, 'import matplotlib.pyplot as plt\n'), ((11375, 11410), 'matplotlib.pyplot.plot', 'plt.plot', (['(100 * history[c])'], {'label': 'c'}), '(100 * history[c], label=c)\n', (11383, 11410), True, 'import matplotlib.pyplot as plt\n'), ((11719, 11734), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11732, 11734), False, 'import torch\n'), ((1557, 1572), 'os.listdir', 'os.listdir', (['col'], {}), '(col)\n', (1567, 1572), False, 'import os\n'), ((5917, 5941), 'torch.max', 'torch.max', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5926, 5941), False, 'import torch\n'), ((10996, 11014), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (11009, 11014), True, 'import matplotlib.pyplot as plt\n'), ((12346, 12369), 'torch.max', 'torch.max', (['score'], {'dim': '(1)'}), '(score, dim=1)\n', (12355, 12369), False, 'import torch\n'), ((13158, 13176), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (13171, 13176), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1668), 'pydicom.dcmread', 'pydicom.dcmread', (["(col + '/' + x)"], {'stop_before_pixels': '(True)'}), "(col + '/' + x, stop_before_pixels=True)\n", (1628, 1668), False, 'import pydicom\n'), ((1801, 1856), 'pydicom.dcmread', 'pydicom.dcmread', (["(col + '/' + x)"], {'stop_before_pixels': '(True)'}), "(col + '/' + x, stop_before_pixels=True)\n", (1816, 1856), False, 'import pydicom\n'), ((2963, 2987), 'torch.from_numpy', 'torch.from_numpy', (['img_3d'], {}), '(img_3d)\n', (2979, 2987), False, 'import torch\n'), ((6678, 6693), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6691, 6693), False, 'import torch\n'), ((7390, 7414), 'torch.max', 'torch.max', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (7399, 7414), False, 'import torch\n'), ((10041, 10130), 'pandas.DataFrame', 'pd.DataFrame', (['history'], {'columns': "['train_loss', 'valid_loss', 'train_acc', 'valid_acc']"}), "(history, columns=['train_loss', 'valid_loss', 'train_acc',\n 'valid_acc'])\n", (10053, 10130), True, 'import pandas as pd\n'), ((6433, 6440), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6438, 6440), True, 'from timeit import default_timer as timer\n'), ((9540, 9547), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9545, 9547), True, 'from timeit import default_timer as timer\n'), ((9838, 9864), 'torch.load', 'torch.load', (['save_file_name'], {}), '(save_file_name)\n', (9848, 9864), False, 'import torch\n')] |
import json
import sys
import numpy as np
import requests
import grequests
import tldextract
# import resource
# resource.setrlimit(resource.RLIMIT_NOFILE, (11000, 11000))
def get_ref_value_code(url):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
'Upgrade-Insecure-Requests': '1', 'DNT': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate'}
r = requests.get(url, headers=headers, timeout=5, auth=('user', 'pass'))
code = r.status_code
# code = urllib.request.urlopen(url).code
except:
code = 404
return code
def exception_handler(request, exception):
return 404
def get_value_code_for_list(url_list):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
'Upgrade-Insecure-Requests': '1', 'DNT': '1',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate'}
rs = (grequests.get(u, headers=headers, timeout=5, auth=('user', 'pass')) for u in url_list)
return grequests.map(rs)
# return {url: get_ref_value_code(url) for url in url_list}
def load_url_list(file_name):
url_list = []
with open(file_name) as f:
for line in f:
url_list.append(line.replace('\n', ''))
return url_list
#
# def applyParallel(split_lists, func):
# with Pool(cpu_count()) as p:
# ret_list = p.map(func, [url_list for url_list in split_lists])
# return list(ret_list)
#
#
def split_list(url_list):
return [l.tolist() for l in np.array_split(url_list, 1000)]
def extract_codes(file_name):
url_list = load_url_list(file_name)
eng_tld = ['tv', 'au', 'gov', 'com', 'net', 'org', 'info', 'edu', 'uk', 'edu', 'uk', 'mt', 'eu', 'ca', 'mil',
'wales', 'nz', 'ph', 'euweb', 'ie', 'id', 'info', 'ac', 'za', 'int', 'london', 'museum']
url_list_en = [url for url in url_list if tldextract.extract(url).suffix]
split_url_list = split_list(url_list)
extracted_code_list = []
counter = 0
for split in split_url_list:
split_extracted = get_value_code_for_list(split)
split_extracted = [url.status_code if url is not None else 404 for url in split_extracted]
print(split_extracted[:15])
extracted_code_list.append(dict(zip(split, split_extracted)))
counter += 0
print(counter)
extracted_code_dict = {k: v for d in extracted_code_list for k, v in d.items()}
return extracted_code_dict
def main():
file_path = sys.argv[1]
df = extract_codes(file_path)
with open('url_code_dictionary.json', 'w') as f:
json.dump(df, f)
if __name__ == "__main__":
main()
| [
"grequests.get",
"requests.get",
"numpy.array_split",
"tldextract.extract",
"grequests.map",
"json.dump"
] | [((1390, 1407), 'grequests.map', 'grequests.map', (['rs'], {}), '(rs)\n', (1403, 1407), False, 'import grequests\n'), ((616, 684), 'requests.get', 'requests.get', (['url'], {'headers': 'headers', 'timeout': '(5)', 'auth': "('user', 'pass')"}), "(url, headers=headers, timeout=5, auth=('user', 'pass'))\n", (628, 684), False, 'import requests\n'), ((1292, 1359), 'grequests.get', 'grequests.get', (['u'], {'headers': 'headers', 'timeout': '(5)', 'auth': "('user', 'pass')"}), "(u, headers=headers, timeout=5, auth=('user', 'pass'))\n", (1305, 1359), False, 'import grequests\n'), ((2968, 2984), 'json.dump', 'json.dump', (['df', 'f'], {}), '(df, f)\n', (2977, 2984), False, 'import json\n'), ((1890, 1920), 'numpy.array_split', 'np.array_split', (['url_list', '(1000)'], {}), '(url_list, 1000)\n', (1904, 1920), True, 'import numpy as np\n'), ((2258, 2281), 'tldextract.extract', 'tldextract.extract', (['url'], {}), '(url)\n', (2276, 2281), False, 'import tldextract\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Fabriquer des formes d'ondes avec numpy
"""
from pyo import *
import numpy as np
s = Server().boot()
# récupère la taille du buffer
bs = s.getBufferSize()
# Crée une table de la longueur du buffer et la lit en boucle
t = DataTable(size=bs)
osc = TableRead(t, freq=t.getRate(), loop=True, mul=1).out()
# Partage la mémoire du tableau avec le numpy array
arr = np.asarray(t.getBuffer())
# Définition des différente formes d'ondes
def sine():
"Sine wave"
arr[:] = np.sin(np.linspace(-np.pi, np.pi, bs))
def sawup():
"Sawtooth up"
arr[:] = np.linspace(-1, 1, bs)
def sawdown():
"Sawtooth down"
arr[:] = np.linspace(1, -1, bs)
def square():
"Square"
arr[:] = [1 if i >= 0 else -1 for i in np.linspace(-1, 1, bs) ]
def triangle():
"Triangle"
arr[:] = 2 * np.abs(np.linspace(-1, 1, bs)) - 1
# affichage
sp = Spectrum(osc)
sc = Scope(osc)
# appel de la fonction qui crée un triangle
triangle()
# Démarre le serveur
s.start()
s.gui(locals())
| [
"numpy.linspace"
] | [((609, 631), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'bs'], {}), '(-1, 1, bs)\n', (620, 631), True, 'import numpy as np\n'), ((681, 703), 'numpy.linspace', 'np.linspace', (['(1)', '(-1)', 'bs'], {}), '(1, -1, bs)\n', (692, 703), True, 'import numpy as np\n'), ((532, 562), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'bs'], {}), '(-np.pi, np.pi, bs)\n', (543, 562), True, 'import numpy as np\n'), ((775, 797), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'bs'], {}), '(-1, 1, bs)\n', (786, 797), True, 'import numpy as np\n'), ((857, 879), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'bs'], {}), '(-1, 1, bs)\n', (868, 879), True, 'import numpy as np\n')] |
import torch.nn as nn
import torchvision
import torch, os
from skimage import morphology as morph
import numpy as np
from src.modules.eprop import eprop
import torch.utils.model_zoo as model_zoo
from scripts.SEAM.network import resnet38_SEAM, resnet38_aff
#----------- LC-FCN8
class FCN8VGG16(nn.Module):
def __init__(self, n_classes, with_attention=False, with_affinity=False,
with_affinity_average=False, shared=False, exp_dict=None):
super().__init__()
self.n_classes = n_classes
self.shared = shared
# PREDEFINE LAYERS
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.relu = nn.ReLU(inplace=True)
# VGG16 PART
self.conv1_1 = conv3x3(3, 64, stride=1, padding=100)
self.conv1_2 = conv3x3(64, 64)
self.conv2_1 = conv3x3(64, 128)
self.conv2_2 = conv3x3(128, 128)
self.conv3_1 = conv3x3(128, 256)
self.conv3_2 = conv3x3(256, 256)
self.conv3_3 = conv3x3(256, 256)
self.conv4_1 = conv3x3(256, 512)
self.conv4_2 = conv3x3(512, 512)
self.conv4_3 = conv3x3(512, 512)
self.conv5_1 = conv3x3(512, 512)
self.conv5_2 = conv3x3(512, 512)
self.conv5_3 = conv3x3(512, 512)
self.fc6 = nn.Conv2d(512, 4096, kernel_size=7, stride=1, padding=0)
self.dropout_f6 = nn.Dropout()
self.fc7 = nn.Conv2d(4096, 4096, kernel_size=1, stride=1, padding=0)
self.dropout_f7 = nn.Dropout()
# SEMANTIC SEGMENTAION PART
self.scoring_layer = nn.Conv2d(4096, self.n_classes, kernel_size=1,
stride=1, padding=0)
self.upscore2 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore_pool4 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=4, stride=2, bias=False)
self.upscore8 = nn.ConvTranspose2d(self.n_classes, self.n_classes,
kernel_size=16, stride=8, bias=False)
# Initilize Weights
self.scoring_layer.weight.data.zero_()
self.scoring_layer.bias.data.zero_()
self.score_pool3 = nn.Conv2d(256, self.n_classes, kernel_size=1)
self.score_pool4 = nn.Conv2d(512, self.n_classes, kernel_size=1)
self.score_pool3.weight.data.zero_()
self.score_pool3.bias.data.zero_()
self.score_pool4.weight.data.zero_()
self.score_pool4.bias.data.zero_()
self.upscore2.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore_pool4.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 4))
self.upscore8.weight.data.copy_(get_upsampling_weight(self.n_classes, self.n_classes, 16))
self.eprop = eprop.EmbeddingPropagation()
# Pretrained layers
pth_url = 'https://download.pytorch.org/models/vgg16-397923af.pth' # download from model zoo
state_dict = model_zoo.load_url(pth_url)
layer_names = [layer_name for layer_name in state_dict]
counter = 0
for p in self.parameters():
if counter < 26: # conv1_1 to pool5
p.data = state_dict[ layer_names[counter] ]
elif counter == 26: # fc6 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 512, 7, 7)
elif counter == 27: # fc6 bias
p.data = state_dict[ layer_names[counter] ]
elif counter == 28: # fc7 weight
p.data = state_dict[ layer_names[counter] ].view(4096, 4096, 1, 1)
elif counter == 29: # fc7 bias
p.data = state_dict[ layer_names[counter] ]
counter += 1
self.with_attention = with_attention
if with_attention:
self.att1 = Attention_block(self.n_classes,
self.n_classes,
self.n_classes).cuda()
self.att2 = Attention_block(self.n_classes,
self.n_classes,
self.n_classes).cuda()
self.with_affinity = with_affinity
if with_affinity or self.shared:
self.model_aff = resnet38_aff.Net(self.n_classes, exp_dict).cuda()
self.model_aff.load_state_dict(torch.load(os.path.join('/mnt/public/weights', 'resnet38_aff_SEAM.pth')), strict=False)
self.with_affinity_average = with_affinity_average
# siamese
# self.siamese_network = Siamese()
def forward(self, x, return_features=False, return_cam=False, crf=False):
n,c,h,w = x.size()
# VGG16 PART
conv1_1 = self.relu( self.conv1_1(x) )
conv1_2 = self.relu( self.conv1_2(conv1_1) )
pool1 = self.pool(conv1_2)
conv2_1 = self.relu( self.conv2_1(pool1) )
conv2_2 = self.relu( self.conv2_2(conv2_1) )
pool2 = self.pool(conv2_2)
# pool2 = self.eprop(pool2)
conv3_1 = self.relu( self.conv3_1(pool2) )
conv3_2 = self.relu( self.conv3_2(conv3_1) )
conv3_3 = self.relu( self.conv3_3(conv3_2) )
pool3 = self.pool(conv3_3)
conv4_1 = self.relu( self.conv4_1(pool3) )
conv4_2 = self.relu( self.conv4_2(conv4_1) )
conv4_3 = self.relu( self.conv4_3(conv4_2) )
pool4 = self.pool(conv4_3)
conv5_1 = self.relu( self.conv5_1(pool4) )
conv5_2 = self.relu( self.conv5_2(conv5_1) )
conv5_3 = self.relu( self.conv5_3(conv5_2) )
pool5 = self.pool(conv5_3)
fc6 = self.dropout_f6( self.relu( self.fc6(pool5) ) )
fc7 = self.dropout_f7( self.relu( self.fc7(fc6) ) )
# SEMANTIC SEGMENTATION PART
# first
scores = self.scoring_layer( fc7 )
upscore2 = self.upscore2(scores)
# second
score_pool4 = self.score_pool4(pool4)
score_pool4c = score_pool4[:, :, 5:5+upscore2.size(2),
5:5+upscore2.size(3)]
if self.with_attention:
score_pool4c = self.att1(g=upscore2, x=score_pool4c)
upscore_pool4 = self.upscore_pool4(score_pool4c + upscore2)
# third
score_pool3 = self.score_pool3(pool3)
score_pool3c = score_pool3[:, :, 9:9+upscore_pool4.size(2),
9:9+upscore_pool4.size(3)]
if self.with_attention:
score_pool3c = self.att2(g=upscore_pool4, x=score_pool3c)
output = self.upscore8(score_pool3c + upscore_pool4)
logits = output[:, :, 31: (31 + h), 31: (31 + w)].contiguous()
if self.shared:
logits = cam = self.model_aff.output_logits(x)
if self.with_affinity:
logits_aff = self.model_aff.apply_affinity(x, logits, crf=crf)
if self.with_affinity_average:
logits = (logits_aff + logits) / 2.
else:
logits = logits_aff
if return_features:
return logits, upscore_pool4, fc7
if return_cam:
return cam, logits_aff
return logits
# ===========================================================
# helpers
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
def conv3x3(in_planes, out_planes, stride=1, padding=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=(3,3), stride=(stride,stride),
padding=(padding,padding))
def conv1x1(in_planes, out_planes, stride=1):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0)
class Attention_block(nn.Module):
def __init__(self,F_g,F_l,F_int):
super(Attention_block,self).__init__()
self.W_g = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(F_int)
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
# nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self,g,x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1+x1)
psi = self.psi(psi)
return x*psi
import torch
import torch.nn as nn
import torch.nn.functional as F
class Siamese(nn.Module):
def __init__(self):
super(Siamese, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(2, 64, 10), # 64@96*96
nn.ReLU(inplace=True),
nn.MaxPool2d(2), # 64@48*48
nn.Conv2d(64, 128, 7),
nn.ReLU(), # 128@42*42
nn.MaxPool2d(2), # 128@21*21
nn.Conv2d(128, 128, 4),
nn.ReLU(), # 128@18*18
nn.MaxPool2d(2), # 128@9*9
nn.Conv2d(128, 256, 4),
nn.ReLU(), # 256@6*6
)
self.liner = nn.Sequential(nn.Linear(4096, 4096), nn.Sigmoid())
self.out = nn.Linear(4096, 1)
def forward_one(self, x):
x = self.conv(x)
x = x.view(x.size()[0], -1)
x = self.liner(x)
return x
def forward(self, x1, x2):
out1 = self.forward_one(x1)
out2 = self.forward_one(x2)
dis = torch.abs(out1 - out2)
out = self.out(dis)
# return self.sigmoid(out)
return out | [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.abs",
"scripts.SEAM.network.resnet38_aff.Net",
"src.modules.eprop.eprop.EmbeddingPropagation",
"torch.utils.model_zoo.load_url",
"os.path.join",
"torch.from_numpy",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.nn.MaxPool2d",
"torch.... | [((7894, 7980), 'numpy.zeros', 'np.zeros', (['(in_channels, out_channels, kernel_size, kernel_size)'], {'dtype': 'np.float64'}), '((in_channels, out_channels, kernel_size, kernel_size), dtype=np.\n float64)\n', (7902, 7980), True, 'import numpy as np\n'), ((8212, 8322), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3, 3)', 'stride': '(stride, stride)', 'padding': '(padding, padding)'}), '(in_planes, out_planes, kernel_size=(3, 3), stride=(stride, stride\n ), padding=(padding, padding))\n', (8221, 8322), True, 'import torch.nn as nn\n'), ((8429, 8502), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, padding=0)\n', (8438, 8502), True, 'import torch.nn as nn\n'), ((597, 650), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=2, stride=2, ceil_mode=True)\n', (609, 650), True, 'import torch.nn as nn\n'), ((671, 692), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (678, 692), True, 'import torch.nn as nn\n'), ((1319, 1375), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(4096)'], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(0)'}), '(512, 4096, kernel_size=7, stride=1, padding=0)\n', (1328, 1375), True, 'import torch.nn as nn\n'), ((1402, 1414), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1412, 1414), True, 'import torch.nn as nn\n'), ((1434, 1491), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', '(4096)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(4096, 4096, kernel_size=1, stride=1, padding=0)\n', (1443, 1491), True, 'import torch.nn as nn\n'), ((1518, 1530), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1528, 1530), True, 'import torch.nn as nn\n'), ((1596, 1663), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', 'self.n_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(4096, self.n_classes, kernel_size=1, stride=1, padding=0)\n', (1605, 1663), True, 'import torch.nn as nn\n'), ((1728, 1819), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['self.n_classes', 'self.n_classes'], {'kernel_size': '(4)', 'stride': '(2)', 'bias': '(False)'}), '(self.n_classes, self.n_classes, kernel_size=4, stride=2,\n bias=False)\n', (1746, 1819), True, 'import torch.nn as nn\n'), ((1888, 1979), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['self.n_classes', 'self.n_classes'], {'kernel_size': '(4)', 'stride': '(2)', 'bias': '(False)'}), '(self.n_classes, self.n_classes, kernel_size=4, stride=2,\n bias=False)\n', (1906, 1979), True, 'import torch.nn as nn\n'), ((2041, 2133), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['self.n_classes', 'self.n_classes'], {'kernel_size': '(16)', 'stride': '(8)', 'bias': '(False)'}), '(self.n_classes, self.n_classes, kernel_size=16, stride=8,\n bias=False)\n', (2059, 2133), True, 'import torch.nn as nn\n'), ((2332, 2377), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', 'self.n_classes'], {'kernel_size': '(1)'}), '(256, self.n_classes, kernel_size=1)\n', (2341, 2377), True, 'import torch.nn as nn\n'), ((2405, 2450), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'self.n_classes'], {'kernel_size': '(1)'}), '(512, self.n_classes, kernel_size=1)\n', (2414, 2450), True, 'import torch.nn as nn\n'), ((2949, 2977), 'src.modules.eprop.eprop.EmbeddingPropagation', 'eprop.EmbeddingPropagation', ([], {}), '()\n', (2975, 2977), False, 'from src.modules.eprop import eprop\n'), ((3128, 3155), 'torch.utils.model_zoo.load_url', 'model_zoo.load_url', (['pth_url'], {}), '(pth_url)\n', (3146, 3155), True, 'import torch.utils.model_zoo as model_zoo\n'), ((9184, 9205), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9191, 9205), True, 'import torch.nn as nn\n'), ((10092, 10110), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(1)'], {}), '(4096, 1)\n', (10101, 10110), True, 'import torch.nn as nn\n'), ((10364, 10386), 'torch.abs', 'torch.abs', (['(out1 - out2)'], {}), '(out1 - out2)\n', (10373, 10386), False, 'import torch\n'), ((8074, 8098), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (8090, 8098), False, 'import torch\n'), ((8690, 8758), 'torch.nn.Conv2d', 'nn.Conv2d', (['F_g', 'F_int'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True)\n', (8699, 8758), True, 'import torch.nn as nn\n'), ((8862, 8930), 'torch.nn.Conv2d', 'nn.Conv2d', (['F_l', 'F_int'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True)\n', (8871, 8930), True, 'import torch.nn as nn\n'), ((9022, 9088), 'torch.nn.Conv2d', 'nn.Conv2d', (['F_int', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True)\n', (9031, 9088), True, 'import torch.nn as nn\n'), ((9132, 9144), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9142, 9144), True, 'import torch.nn as nn\n'), ((9584, 9604), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2)', '(64)', '(10)'], {}), '(2, 64, 10)\n', (9593, 9604), True, 'import torch.nn as nn\n'), ((9630, 9651), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (9637, 9651), True, 'import torch.nn as nn\n'), ((9665, 9680), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (9677, 9680), True, 'import torch.nn as nn\n'), ((9706, 9727), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(7)'], {}), '(64, 128, 7)\n', (9715, 9727), True, 'import torch.nn as nn\n'), ((9741, 9750), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9748, 9750), True, 'import torch.nn as nn\n'), ((9779, 9794), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (9791, 9794), True, 'import torch.nn as nn\n'), ((9822, 9844), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(4)'], {}), '(128, 128, 4)\n', (9831, 9844), True, 'import torch.nn as nn\n'), ((9858, 9867), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9865, 9867), True, 'import torch.nn as nn\n'), ((9893, 9908), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (9905, 9908), True, 'import torch.nn as nn\n'), ((9932, 9954), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(4)'], {}), '(128, 256, 4)\n', (9941, 9954), True, 'import torch.nn as nn\n'), ((9968, 9977), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9975, 9977), True, 'import torch.nn as nn\n'), ((10036, 10057), 'torch.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (10045, 10057), True, 'import torch.nn as nn\n'), ((10059, 10071), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10069, 10071), True, 'import torch.nn as nn\n'), ((4438, 4480), 'scripts.SEAM.network.resnet38_aff.Net', 'resnet38_aff.Net', (['self.n_classes', 'exp_dict'], {}), '(self.n_classes, exp_dict)\n', (4454, 4480), False, 'from scripts.SEAM.network import resnet38_SEAM, resnet38_aff\n'), ((4542, 4602), 'os.path.join', 'os.path.join', (['"""/mnt/public/weights"""', '"""resnet38_aff_SEAM.pth"""'], {}), "('/mnt/public/weights', 'resnet38_aff_SEAM.pth')\n", (4554, 4602), False, 'import torch, os\n')] |
"""Components on datasets"""
import numpy as np
from .base import DatasetBunch, RuleBunch
def load_intents_from_mysql(configs: dict) -> DatasetBunch:
"""
Load intent dataset from mysql database.
Parameters
----------
configs: Configs of mysql connnection, which includes keys:
"host" - host of database,
"port" - port of database
"user" - user of database,
"password" - <PASSWORD>,
"db" - database name of the dataset,
"table" - table name of the dataset,
"charset" - charset of the dataset, default value "utf8",
"customer" - the customer's name.
Returns
-------
Sklearn Bunch instance, including attributes:
words - strings, user's words
context - string in json format to offer extended features of context
intent_labels - string, intent name in form of multi-levels
separated with "," for multi-labels, such as
"news/sports/football,person/story", which means labels
"news/sports/football" and "person/story".
"""
import pymysql
for key in ["host", "port", "user", "password", "db", "table"]:
assert key in configs, "mysql configs error!"
words = []
contexts = []
intents = []
db = pymysql.connect(host=configs["host"], port=configs["port"],
user=configs["user"], password=configs["password"])
cursor = db.cursor()
customer = configs.get("customer")
if customer and customer != "common":
sql = "select word, context, intent_labels " \
"from {db}.{table} " \
"where in_use=1 and customer in ('common', '{customer}')". \
format(db=configs["db"], table=configs["table"],
customer=customer)
else:
sql = "select word, context, intent_labels " \
"from {db}.{table} " \
"where in_use=1 and customer='common'". \
format(db=configs["db"], table=configs["table"])
cursor.execute(sql)
for word, context, intent_labels in cursor.fetchall():
if not intent_labels:
continue
if not word and not context:
continue
words.append(word.lower()) if word else words.append("")
contexts.append(context.lower()) if context else contexts.append("{}")
intents.append(intent_labels.lower())
cursor.close()
db.close()
return DatasetBunch(words=np.array(words, dtype=np.str),
contexts=np.array(contexts, dtype=np.str),
intents=np.array(intents, dtype=np.str))
def load_intents_from_csv(csv_path: str, customer: str= "common") -> DatasetBunch:
"""
Load intent dataset from csv file, which has fields:
words - user's words.
It could an empty string "" if no word input, like predict user's
intent merely by context information.
context - json format string.
If no context information, it could as well be an empty string ""
or "{}".
intent_labels - labels of intent.
String separated with ",", such as
"news/sports/football,person/story", which means labels
"news/sports/football" and "person/story".
customer - customer, default value "common" means it is common for
any customer.
Parameters
----------
csv_path: the path of the dataset file.
customer: the customer of the dataset.
Returns
-------
Sklearn Bunch instance, including attributes:
words - strings, user's words
context - string in json format to offer extended features of context
intent_labels - string, intent name in form of multi-levels
separated with "," for multi-labels, such as
"news/sports/football,person/story", which means labels
"news/sports/football" and "person/story".
"""
import csv
with open(csv_path) as f:
csv_file = csv.reader(f)
next(csv_file) # skip the header
words = []
contexts = []
intents = []
for word, context, intent_labels, cust in csv_file:
if not word and (not context or context.strip()=="{}"):
continue
if cust not in ("common", customer):
continue
words.append(word)
contexts.append(context) if context else context.append("{}")
intents.append(intent_labels)
return DatasetBunch(words=np.array(words, dtype=np.str),
contexts=np.array(contexts, dtype=np.str),
intents=np.array(intents, dtype=np.str))
def load_rules_from_mysql(configs: dict) -> RuleBunch:
"""
Load rules from mysql database.
Parameters
----------
configs: Configs of mysql connnection, which includes keys:
"host" - host of database,
"port" - port of database
"user" - user of database,
"password" - password to login the database,
"db" - database name of the dataset,
"table" - table name of the dataset,
"charset" - charset of the dataset, default value "utf8",
"customer" - the customer's name.
Returns
-------
RuleBunch instance including words_rules and context_rules.
"""
import pymysql
for key in ["host", "port", "user", "password", "db", "table", "customer"]:
assert key in configs, "mysql configs error!"
words_rules = []
context_rules = []
intent_labels = []
db = pymysql.connect(host=configs["host"], port=configs["port"],
user=configs["user"], password=configs["password"])
cursor = db.cursor()
sql = "select words_rule, context_rule, intent_labels " \
"from {db}.{table} " \
"where in_use=1 and customer='{customer}'". \
format(db=configs["db"], table=configs["table"],
customer=configs["customer"])
cursor.execute(sql)
for words_rule, context_rule, intent_label in cursor.fetchall():
if not intent_label or not intent_label.strip():
continue
if not words_rule and (not context_rule or context_rule.strip() == "{}"):
continue
words_rules.append(words_rule) if words_rule \
else words_rules.append("")
context_rules.append(context_rule) if context_rule \
else context_rules.append({})
intent_labels.append([label.stip() for label in intent_label.split(",")])
cursor.close()
db.close()
return RuleBunch(words_rules=words_rules, context_rules=context_rules,
intent_labels=intent_labels)
| [
"numpy.array",
"pymysql.connect",
"csv.reader"
] | [((1264, 1380), 'pymysql.connect', 'pymysql.connect', ([], {'host': "configs['host']", 'port': "configs['port']", 'user': "configs['user']", 'password': "configs['password']"}), "(host=configs['host'], port=configs['port'], user=configs[\n 'user'], password=configs['password'])\n", (1279, 1380), False, 'import pymysql\n'), ((5520, 5636), 'pymysql.connect', 'pymysql.connect', ([], {'host': "configs['host']", 'port': "configs['port']", 'user': "configs['user']", 'password': "configs['password']"}), "(host=configs['host'], port=configs['port'], user=configs[\n 'user'], password=configs['password'])\n", (5535, 5636), False, 'import pymysql\n'), ((3949, 3962), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (3959, 3962), False, 'import csv\n'), ((2441, 2470), 'numpy.array', 'np.array', (['words'], {'dtype': 'np.str'}), '(words, dtype=np.str)\n', (2449, 2470), True, 'import numpy as np\n'), ((2505, 2537), 'numpy.array', 'np.array', (['contexts'], {'dtype': 'np.str'}), '(contexts, dtype=np.str)\n', (2513, 2537), True, 'import numpy as np\n'), ((2571, 2602), 'numpy.array', 'np.array', (['intents'], {'dtype': 'np.str'}), '(intents, dtype=np.str)\n', (2579, 2602), True, 'import numpy as np\n'), ((4476, 4505), 'numpy.array', 'np.array', (['words'], {'dtype': 'np.str'}), '(words, dtype=np.str)\n', (4484, 4505), True, 'import numpy as np\n'), ((4540, 4572), 'numpy.array', 'np.array', (['contexts'], {'dtype': 'np.str'}), '(contexts, dtype=np.str)\n', (4548, 4572), True, 'import numpy as np\n'), ((4606, 4637), 'numpy.array', 'np.array', (['intents'], {'dtype': 'np.str'}), '(intents, dtype=np.str)\n', (4614, 4637), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 13:55:58 2019
@author: Roel
"""
#%% Loading data
## See https://books.google.nl/books?id=C3FyDwAAQBAJ&pg=PA238&lpg=PA238&dq=.h5+.raw&source=bl&ots=D1CgyXYZjC&sig=ACfU3U1RIifIb8Rnn1pDRTalNVxBE0iebg&hl=nl&sa=X&ved=2ahUKEwjB6cSBntjlAhUPalAKHc8nAVUQ6AEwAnoECAkQAQ#v=onepage&q=.h5%20.raw&f=false
## For specs on how data is saved.
## https://github.com/cbassa/lofar_bf_tutorials/blob/master/solutions/reading_hdf5_headers.ipynb
## https://github.com/cbassa/lofar_bf_tutorials/blob/master/solutions/reading_hdf5_stokes_data.ipynb
## https://www.astron.nl/lofarschool2018/Documents/Thursday/bassa.pdf
## https://stackoverflow.com/questions/28170623/how-to-read-hdf5-files-in-python
import h5py
import numpy as np
import matplotlib.pyplot as plt
filename = 'L701913_SAP000_B000_S0_P000_bf.h5'
h5 = h5py.File(filename, "r")
## https://stackoverflow.com/questions/46733052/read-hdf5-file-into-numpy-array
#%% Showing essential attributes and structure of the
def print_name(name):
print(name)
h5.visit(print_name)
group = h5["/"]
keys = sorted(["%s"%item for item in sorted(list(group.attrs))])
for key in keys:
print(key + " = " + str(group.attrs[key]))
group = h5["/SUB_ARRAY_POINTING_000/BEAM_000/STOKES_0"]
keys = sorted(["%s"%item for item in sorted(list(group.attrs))])
for key in keys:
print(key + " = " + str(group.attrs[key]))
#%% plotting (part of) the data
stokes = h5["/SUB_ARRAY_POINTING_000/BEAM_000/STOKES_0"]
data = 10.0*np.log10(stokes[1::300,:])
freq = h5["/SUB_ARRAY_POINTING_000/BEAM_000/COORDINATES/COORDINATE_1"].attrs["AXIS_VALUES_WORLD"]*1e-6
tsamp = h5["/SUB_ARRAY_POINTING_000/BEAM_000/COORDINATES/COORDINATE_0"].attrs["INCREMENT"]
t = tsamp*np.arange(stokes.shape[0])
vmin = np.median(data)-2.0*np.std(data)
vmax = np.median(data)+6.0*np.std(data)
plt.figure(figsize=(20, 10))
plt.imshow(data.T, aspect='auto', vmin=vmin, vmax=vmax, origin='lower', extent=[t[0], t[-1], freq[0], freq[-1]])
plt.xlabel("Time (s)")
plt.ylabel("Frequency (MHz)")
plt.colorbar().set_label('Power (dB)', rotation=270) | [
"matplotlib.pyplot.imshow",
"numpy.log10",
"numpy.median",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"h5py.File",
"matplotlib.pyplot.figure",
"numpy.std",
"numpy.arange"
] | [((866, 890), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (875, 890), False, 'import h5py\n'), ((1904, 1932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1914, 1932), True, 'import matplotlib.pyplot as plt\n'), ((1934, 2050), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data.T'], {'aspect': '"""auto"""', 'vmin': 'vmin', 'vmax': 'vmax', 'origin': '"""lower"""', 'extent': '[t[0], t[-1], freq[0], freq[-1]]'}), "(data.T, aspect='auto', vmin=vmin, vmax=vmax, origin='lower',\n extent=[t[0], t[-1], freq[0], freq[-1]])\n", (1944, 2050), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2070), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (2058, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2072, 2101), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (MHz)"""'], {}), "('Frequency (MHz)')\n", (2082, 2101), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1581), 'numpy.log10', 'np.log10', (['stokes[1::300, :]'], {}), '(stokes[1::300, :])\n', (1562, 1581), True, 'import numpy as np\n'), ((1790, 1816), 'numpy.arange', 'np.arange', (['stokes.shape[0]'], {}), '(stokes.shape[0])\n', (1799, 1816), True, 'import numpy as np\n'), ((1827, 1842), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (1836, 1842), True, 'import numpy as np\n'), ((1868, 1883), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (1877, 1883), True, 'import numpy as np\n'), ((1847, 1859), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (1853, 1859), True, 'import numpy as np\n'), ((1888, 1900), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (1894, 1900), True, 'import numpy as np\n'), ((2103, 2117), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2115, 2117), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) WiPhy Development Team
# This library is released under the MIT License, see LICENSE.txt
__all__ = ['lorenz', 'RungeKutta', 'odeintRungeKutta', 'logisticMap', 'logisticMapClosedForm', 'getLogisticMapSequence',
'getLogisticMapSequenceOriginal', 'getUniformLogisticMapSequenceOriginal',
'getSecondChebyshevPolynomialSequence']
import numpy as np
from numba import njit
def lorenz(p, t, rho=28.0, sigma=10.0, beta=8.0 / 3.0):
return np.array([sigma * (p[1] - p[0]), p[0] * (rho - p[2]) - p[1], p[0] * p[1] - beta * p[2]])
def RungeKutta(f, p, t, h):
k1 = f(p, t)
k2 = f(p + h / 2.0 * k1, t + h / 2.0)
k3 = f(p + h / 2.0 * k2, t + h / 2.0)
k4 = f(p + h * k3, t + h)
return p + h / 6.0 * (k1 + 2.0 * k2 + 2.0 * k3 + k4)
@njit
def odeintRungeKutta(f, initialp, ts):
ret = np.zeros((len(ts), 3))
ret[0, :] = initialp
for i in range(len(ts) - 1):
ret[i + 1, :] = RungeKutta(f, ret[i, :], ts[i], ts[i + 1] - ts[i])
return ret
@njit
def logisticMap(xn, a=4.0):
return a * xn * (1.0 - xn)
# with a = 4.0
def logisticMapClosedForm(x0, i):
return np.square(np.sin(np.exp2(i) * np.arcsin(np.sqrt(x0))))
@njit
def getLogisticMapSequence(x0, size):
ret = np.zeros(size)
ret[0] = x0
ret[1:size] = logisticMapClosedForm(x0, np.arange(1, size))
# asqx0 = np.arcsin(np.sqrt(x0))
# for i in range(1, len(ret)):
# ret[i] = np.square(np.sin(2**i * asqx0))
# 2 ** (i + log2(asqx0))
return ret
# x0 \in [0,1]
@njit
def getLogisticMapSequenceOriginal(x0, size):
ret = np.zeros(size)
ret[0] = x0
for i in range(1, len(ret)):
ret[i] = logisticMap(ret[i - 1])
return ret
# x0 \in [0,1]
@njit
def getUniformLogisticMapSequenceOriginal(x0, size):
ret = getLogisticMapSequenceOriginal(x0, size)
return 2.0 * np.arcsin(np.sqrt(ret)) / np.pi
# x0 \in [-1,1]
@njit
def getSecondChebyshevPolynomialSequence(x0, size):
ret = np.zeros(size)
ret[0] = x0
for i in range(1, len(ret)):
ret[i] = 1 - 2 * ret[i - 1] ** 2
# normalization
print("np.mean(ret) = %f" % np.mean(ret))
ret -= np.mean(ret)
print("np.sqrt(np.var(ret)) = %f " % np.sqrt(np.var(ret)))
ret /= np.sqrt(np.var(ret))
return ret
| [
"numpy.mean",
"numpy.sqrt",
"numpy.exp2",
"numpy.array",
"numpy.zeros",
"numpy.var",
"numpy.arange"
] | [((476, 569), 'numpy.array', 'np.array', (['[sigma * (p[1] - p[0]), p[0] * (rho - p[2]) - p[1], p[0] * p[1] - beta * p[2]]'], {}), '([sigma * (p[1] - p[0]), p[0] * (rho - p[2]) - p[1], p[0] * p[1] - \n beta * p[2]])\n', (484, 569), True, 'import numpy as np\n'), ((1251, 1265), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1259, 1265), True, 'import numpy as np\n'), ((1596, 1610), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1604, 1610), True, 'import numpy as np\n'), ((1978, 1992), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1986, 1992), True, 'import numpy as np\n'), ((2161, 2173), 'numpy.mean', 'np.mean', (['ret'], {}), '(ret)\n', (2168, 2173), True, 'import numpy as np\n'), ((1326, 1344), 'numpy.arange', 'np.arange', (['(1)', 'size'], {}), '(1, size)\n', (1335, 1344), True, 'import numpy as np\n'), ((2256, 2267), 'numpy.var', 'np.var', (['ret'], {}), '(ret)\n', (2262, 2267), True, 'import numpy as np\n'), ((2136, 2148), 'numpy.mean', 'np.mean', (['ret'], {}), '(ret)\n', (2143, 2148), True, 'import numpy as np\n'), ((1157, 1167), 'numpy.exp2', 'np.exp2', (['i'], {}), '(i)\n', (1164, 1167), True, 'import numpy as np\n'), ((1870, 1882), 'numpy.sqrt', 'np.sqrt', (['ret'], {}), '(ret)\n', (1877, 1882), True, 'import numpy as np\n'), ((2223, 2234), 'numpy.var', 'np.var', (['ret'], {}), '(ret)\n', (2229, 2234), True, 'import numpy as np\n'), ((1180, 1191), 'numpy.sqrt', 'np.sqrt', (['x0'], {}), '(x0)\n', (1187, 1191), True, 'import numpy as np\n')] |
import numpy as np
from mimas.spectra.similarity.tools import clean_spectrum
from mimas.spectra.similarity.tools_fast import centroid_spectrum
def preprocess_peaks(precursor_mz, peaks, noise, precursor_removal=None, ms2_da=None, ms2_ppm=None):
return clean_spectrum_new(peaks, max_mz=precursor_mz - precursor_removal, noise_threshold=noise, ms2_ppm=ms2_ppm,
ms2_da=ms2_da)
def clean_spectrum_new(spectrum: np.ndarray,
max_mz: float = None,
noise_threshold=0.01,
max_peak_num: int = None,
ms2_ppm: float = None, ms2_da: float = 0.05) -> np.ndarray:
"""
Clean the spectrum with the following steps:
1. Remove the peaks have m/z higher than the max_mz. This step can be used for
remove precursor ions.
2. Centroid the spectrum, merge the peaks within the +/- ms2_ppm or +/- ms2_da, sort the result spectrum by m/z.
3. Remove the peaks with intensity less than the noise_threshold * maximum (intensity).
4. Normalize the intensity to sum to 1.
At least one of the ms2_ppm or ms2_da need be not None. If both ms2_da and ms2_ppm is given, ms2_da will be used.
:param spectrum: The spectrum.
:param max_mz: The maximum m/z to keep, if None, all the peaks will be kept.
:param noise_threshold: The noise threshold, peaks have intensity lower than
noise_threshold * maximum (intensity) will be removed.
If None, all the peaks will be kept.
:param max_peak_num: The maximum number of peaks to keep. If None, all the peaks will be kept.
:param ms2_ppm: The mass accuracy in ppm.
:param ms2_da: The mass accuracy in Da.
:return: The cleaned spectrum.
"""
# Check the input.
if ms2_ppm is None and ms2_da is None:
raise RuntimeError("Either ms2_ppm or ms2_da should be given!")
# Convert the spectrum to numpy array.
spectrum = convert_spectrum_to_numpy_array(spectrum)
# 1. Remove the peaks have m/z higher than the max_mz.
if max_mz is not None:
spectrum = spectrum[spectrum[:, 0] <= max_mz]
# Sort spectrum by m/z.
spectrum = spectrum[np.argsort(spectrum[:, 0])]
# 2. Centroid the spectrum, merge the peaks within the +/- ms2_ppm or +/- ms2_da.
spectrum = centroid_spectrum(spectrum, ms2_ppm=ms2_ppm, ms2_da=ms2_da)
# 3. Remove the peaks with intensity less than the noise_threshold * maximum (intensity).
if noise_threshold is not None and spectrum.shape[0] > 0:
spectrum = spectrum[spectrum[:, 1] >= noise_threshold * np.max(spectrum[:, 1])]
# 4. Select the top max_peak_num peaks.
if max_peak_num is not None and spectrum.shape[0] > 0:
spectrum = spectrum[np.argsort(spectrum[:, 1])[-max_peak_num:]]
spectrum = spectrum[np.argsort(spectrum[:, 0])]
# 4. Normalize the intensity to sum to 1.
spectrum_sum = np.sum(spectrum[:, 1])
if spectrum_sum == 0:
return spectrum
else:
spectrum[:, 1] /= spectrum_sum
return spectrum
def convert_spectrum_to_numpy_array(spectrum) -> np.ndarray:
"""
Convert the spectrum to numpy array.
"""
spectrum = np.asarray(spectrum, dtype=np.float32, order="C")
if spectrum.shape[0] == 0:
return np.zeros(0, dtype=np.float32, order="C").reshape(-1, 2)
if spectrum.ndim != 2:
raise RuntimeError("Error in input spectrum format!")
return spectrum
| [
"mimas.spectra.similarity.tools_fast.centroid_spectrum",
"numpy.asarray",
"numpy.max",
"numpy.argsort",
"numpy.sum",
"numpy.zeros"
] | [((2360, 2419), 'mimas.spectra.similarity.tools_fast.centroid_spectrum', 'centroid_spectrum', (['spectrum'], {'ms2_ppm': 'ms2_ppm', 'ms2_da': 'ms2_da'}), '(spectrum, ms2_ppm=ms2_ppm, ms2_da=ms2_da)\n', (2377, 2419), False, 'from mimas.spectra.similarity.tools_fast import centroid_spectrum\n'), ((2967, 2989), 'numpy.sum', 'np.sum', (['spectrum[:, 1]'], {}), '(spectrum[:, 1])\n', (2973, 2989), True, 'import numpy as np\n'), ((3248, 3297), 'numpy.asarray', 'np.asarray', (['spectrum'], {'dtype': 'np.float32', 'order': '"""C"""'}), "(spectrum, dtype=np.float32, order='C')\n", (3258, 3297), True, 'import numpy as np\n'), ((2231, 2257), 'numpy.argsort', 'np.argsort', (['spectrum[:, 0]'], {}), '(spectrum[:, 0])\n', (2241, 2257), True, 'import numpy as np\n'), ((2869, 2895), 'numpy.argsort', 'np.argsort', (['spectrum[:, 0]'], {}), '(spectrum[:, 0])\n', (2879, 2895), True, 'import numpy as np\n'), ((2797, 2823), 'numpy.argsort', 'np.argsort', (['spectrum[:, 1]'], {}), '(spectrum[:, 1])\n', (2807, 2823), True, 'import numpy as np\n'), ((3344, 3384), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32', 'order': '"""C"""'}), "(0, dtype=np.float32, order='C')\n", (3352, 3384), True, 'import numpy as np\n'), ((2641, 2663), 'numpy.max', 'np.max', (['spectrum[:, 1]'], {}), '(spectrum[:, 1])\n', (2647, 2663), True, 'import numpy as np\n')] |
"""
Transforms the .mat file outputted from the spikesorting into two DataFrames,
saving each into pickle files, and adding them to shortcuts,
with dataset names 'spikes' and 'behavior'
"""
import sys
sys.path.append('.')
from scipy.io import loadmat
from spikelearn.data import io, SHORTCUTS
import pandas as pd
import numpy as np
import h5py
def spikes_behavior_from_ez(filename):
def behav_to_df(f, is_h5=True):
if is_h5:
behav = pd.DataFrame({'duration':f['behavior']['DRRD'][:].reshape(-1),
'offset':f['behavior']['NPEnd'][:].reshape(-1),
'onset':f['behavior']['NPStart'][:].reshape(-1)}, index=pd.Index(np.arange(f['behavior']['DRRD'].shape[1])+1, name='trial'))
else:
behav = pd.DataFrame({'duration':f['behavior']['DRRD'][0,0].reshape(-1),
'offset':f['behavior']['NPEnd'][0,0].reshape(-1),
'onset':f['behavior']['NPStart'][0,0].reshape(-1)}, index=pd.Index(np.arange(f['behavior']['DRRD'][0,0].shape[0])+1, name='trial'))
assert not any(behav.duration - (behav.offset-behav.onset)> 1e-10), 'There are inconsistencies in duration'
return behav
def spikes_inside(times, onset, offset, baseline = .5):
return times[np.logical_and(times>(onset-baseline), times<offset)]
def relevant_spikes(times, behavior):
spk, trials = [], []
for trial, row in behavior.iterrows():
aux_spk = list(spikes_inside(times, row.onset, row.offset))
spk += aux_spk
trials += [trial for i in range(len(aux_spk))]
return np.array(spk), np.array(trials)
try:
behavior = behav_to_df(h5py.File('%s/Behavior.mat'%filename, 'r'))
except:
behavior = behav_to_df(loadmat('%s/Behavior.mat'%filename), is_h5=False)
mat = loadmat('%s/spikes/openephys/openephys.spikes.cellinfo.mat'%filename)['spikes'][0,0]
quality = pd.read_csv('%s/spikes/openephys/cluster_quality.tsv'%filename, '\t')
infos = pd.DataFrame(mat[4].squeeze(), columns=['waveforms']).join(quality)
infos['area'] = np.hstack(mat[6].squeeze())
spikes = pd.DataFrame(mat[1].squeeze(),
columns=['times']).applymap(np.hstack).join(infos)
spikes['trial'] = spikes.times.apply(lambda x: relevant_spikes(x,
behavior)[1])
spikes['times'] = spikes.times.apply(lambda x: relevant_spikes(x,
behavior)[0])
# Calculate relative spike time
spikes['trial_time'] = pd.DataFrame(np.transpose([spikes.times[i] - behavior.iloc[spikes.trial[i]-1].onset.as_matrix() for i in range(spikes.shape[0])]))
return spikes, behavior
def spikes_behavior_from_mat(filename):
"""
Loads a mat-file into two DataFrames
Parameters
----------
Returns
-------
spikes : DataFrame (n_units, 3)
Contains three ndarray fields, indexed by the unit (neuron).
Each ndarray has the form (n_spikes_i,) being different for each row.
'times' holds the absolute times of each spike from the session begin.
'trial' holds the trial number of each corresponding spike from times.
'trial_time' has the relative time of each spike from trial onset.
behavior : DataFrame (n_trials, 3)
Contains five number fields of trial attributes.
'onset' is the time of trial beginning
'offset' is the end of the trial
'duration' is equal to offset - onset
"""
data = loadmat(filename)
spikes = data['dados'][0,0][1]
behavior = data['dados'][0,0][0]
spikes = pd.DataFrame([[ spikes[0,i][0][:,0], spikes[0,i][0][:,1]] for i in range(spikes.shape[1]) if spikes[0,i][0].shape[1]==2], columns=['times','trial'])
behavior = pd.DataFrame(np.transpose(behavior[0,0][0]), columns=['one','onset','offset','zero', 'duration', 'sortIdx', 'sortLabel']).drop(['one', 'zero', 'sortIdx', 'sortLabel'], axis=1)
behavior['trial'] = np.arange(1, behavior.shape[0]+1)
behavior=behavior.set_index('trial')
# Calculate relative spike time
spikes['trial_time'] = pd.DataFrame(np.transpose([spikes.times[i] - behavior.iloc[spikes.trial[i]-1].onset.as_matrix() for i in range(spikes.shape[0])]))
return spikes, behavior
# Load into DataFrames each data
for rat in SHORTCUTS['groups']['ALL']:
filepath = io.load(rat, 'spikesorted', getpath=True)
if rat in SHORTCUTS['groups']['GB']:
spikes, behavior = spikes_behavior_from_mat(filepath)
elif rat in SHORTCUTS['groups']['EZ']:
print(filepath)
spikes, behavior = spikes_behavior_from_ez(filepath)
else:
raise NotImplementedError('This dataset is not included as a special case')
identifiers = dict(session=rat.split()[0], rat_number=rat.split()[1] )
io.save(spikes, rat, 'spikes', 'interim', **identifiers)
io.save(behavior, rat, 'behavior', 'interim', **identifiers)
| [
"pandas.read_csv",
"numpy.arange",
"numpy.logical_and",
"scipy.io.loadmat",
"spikelearn.data.io.save",
"h5py.File",
"numpy.array",
"numpy.transpose",
"sys.path.append",
"spikelearn.data.io.load"
] | [((202, 222), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (217, 222), False, 'import sys\n'), ((1920, 1991), 'pandas.read_csv', 'pd.read_csv', (["('%s/spikes/openephys/cluster_quality.tsv' % filename)", '"""\t"""'], {}), "('%s/spikes/openephys/cluster_quality.tsv' % filename, '\\t')\n", (1931, 1991), True, 'import pandas as pd\n'), ((3579, 3596), 'scipy.io.loadmat', 'loadmat', (['filename'], {}), '(filename)\n', (3586, 3596), False, 'from scipy.io import loadmat\n'), ((4051, 4086), 'numpy.arange', 'np.arange', (['(1)', '(behavior.shape[0] + 1)'], {}), '(1, behavior.shape[0] + 1)\n', (4060, 4086), True, 'import numpy as np\n'), ((4438, 4479), 'spikelearn.data.io.load', 'io.load', (['rat', '"""spikesorted"""'], {'getpath': '(True)'}), "(rat, 'spikesorted', getpath=True)\n", (4445, 4479), False, 'from spikelearn.data import io, SHORTCUTS\n'), ((4885, 4941), 'spikelearn.data.io.save', 'io.save', (['spikes', 'rat', '"""spikes"""', '"""interim"""'], {}), "(spikes, rat, 'spikes', 'interim', **identifiers)\n", (4892, 4941), False, 'from spikelearn.data import io, SHORTCUTS\n'), ((4946, 5006), 'spikelearn.data.io.save', 'io.save', (['behavior', 'rat', '"""behavior"""', '"""interim"""'], {}), "(behavior, rat, 'behavior', 'interim', **identifiers)\n", (4953, 5006), False, 'from spikelearn.data import io, SHORTCUTS\n'), ((1254, 1310), 'numpy.logical_and', 'np.logical_and', (['(times > onset - baseline)', '(times < offset)'], {}), '(times > onset - baseline, times < offset)\n', (1268, 1310), True, 'import numpy as np\n'), ((1601, 1614), 'numpy.array', 'np.array', (['spk'], {}), '(spk)\n', (1609, 1614), True, 'import numpy as np\n'), ((1616, 1632), 'numpy.array', 'np.array', (['trials'], {}), '(trials)\n', (1624, 1632), True, 'import numpy as np\n'), ((1673, 1717), 'h5py.File', 'h5py.File', (["('%s/Behavior.mat' % filename)", '"""r"""'], {}), "('%s/Behavior.mat' % filename, 'r')\n", (1682, 1717), False, 'import h5py\n'), ((1820, 1891), 'scipy.io.loadmat', 'loadmat', (["('%s/spikes/openephys/openephys.spikes.cellinfo.mat' % filename)"], {}), "('%s/spikes/openephys/openephys.spikes.cellinfo.mat' % filename)\n", (1827, 1891), False, 'from scipy.io import loadmat\n'), ((1760, 1797), 'scipy.io.loadmat', 'loadmat', (["('%s/Behavior.mat' % filename)"], {}), "('%s/Behavior.mat' % filename)\n", (1767, 1797), False, 'from scipy.io import loadmat\n'), ((3862, 3893), 'numpy.transpose', 'np.transpose', (['behavior[0, 0][0]'], {}), '(behavior[0, 0][0])\n', (3874, 3893), True, 'import numpy as np\n'), ((661, 702), 'numpy.arange', 'np.arange', (["f['behavior']['DRRD'].shape[1]"], {}), "(f['behavior']['DRRD'].shape[1])\n", (670, 702), True, 'import numpy as np\n'), ((953, 1000), 'numpy.arange', 'np.arange', (["f['behavior']['DRRD'][0, 0].shape[0]"], {}), "(f['behavior']['DRRD'][0, 0].shape[0])\n", (962, 1000), True, 'import numpy as np\n')] |
import keras
from keras.models import Sequential
from keras.layers import Conv2D,Dense,Flatten,Dropout
import numpy as np
m1=m2=m3=m4=np.zeros((4,4))
data_train=np.load("256train.npy")
data_train = [i+1 for i in data_train]
data_train=np.reshape(data_train,(-1,17))
y_train=data_train[:,16]
#print(len(y_train))
x_train=data_train[:,0:16]
x_train=np.reshape(x_train,(-1,4,4))
x_train_new=np.zeros((len(x_train),8,8))
for i in range(len(x_train)):
m1=x_train[i]
m2=m1.T
for j in range(4):
m3[j]=x_train[i][3-j]
m4=m3.T
x_train_new[i]=np.concatenate((np.concatenate((m1, m2), axis=0), np.concatenate((m4, m3), axis=0)), axis=1)
x_train_new=np.reshape(x_train_new,(-1,8,8,1))
data_test=np.load("256.npy")
data_test= [i+1 for i in data_test]
data_test=np.reshape(data_test,(-1,17))
y_test=data_test[:,16]
x_test=data_test[:,0:16]
x_test=np.reshape(x_test,(-1,4,4))
x_test_new=np.zeros((len(x_test),8,8))
for i in range(len(x_test)):
m1=x_train[i]
m2=m1.T
for j in range(4):
m3[j]=x_test[i][3-j]
m4=m3.T
x_test_new[i]=np.concatenate((np.concatenate((m1, m2), axis=0), np.concatenate((m4, m3), axis=0)), axis=1)
x_test_new=np.reshape(x_test_new,(-1,8,8,1))
y_train=[i-1 for i in y_train]
y_train=keras.utils.to_categorical(y_train)
y_test=[i-1 for i in y_test]
y_test=keras.utils.to_categorical(y_test)
model=Sequential()
model.add(Conv2D(64,kernel_size=(4,1),strides=1,padding='valid',activation='relu',input_shape=(8,8,1)))
model.add(Conv2D(128,kernel_size=(1,4),strides=1,padding='valid',activation='relu'))
model.add(Conv2D(128,kernel_size=(2,2),strides=1,padding='same',activation='relu'))
model.add(Conv2D(200,kernel_size=(2,2),strides=1,padding='same',activation='relu'))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(120,activation='softmax'))
model.add(Dense(64,activation='softmax'))
model.add(Dense(4,activation='softmax'))
model.summary()
model.compile('sgd',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(x_train_new,y_train,batch_size=1,epochs=30,validation_data=[x_test_new,y_test])
model.save('model_256.h5')
| [
"keras.layers.Conv2D",
"numpy.reshape",
"keras.layers.Flatten",
"keras.models.Sequential",
"keras.utils.to_categorical",
"numpy.zeros",
"numpy.concatenate",
"keras.layers.Dense",
"numpy.load",
"keras.layers.Dropout"
] | [((136, 152), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (144, 152), True, 'import numpy as np\n'), ((163, 186), 'numpy.load', 'np.load', (['"""256train.npy"""'], {}), "('256train.npy')\n", (170, 186), True, 'import numpy as np\n'), ((237, 269), 'numpy.reshape', 'np.reshape', (['data_train', '(-1, 17)'], {}), '(data_train, (-1, 17))\n', (247, 269), True, 'import numpy as np\n'), ((349, 380), 'numpy.reshape', 'np.reshape', (['x_train', '(-1, 4, 4)'], {}), '(x_train, (-1, 4, 4))\n', (359, 380), True, 'import numpy as np\n'), ((668, 706), 'numpy.reshape', 'np.reshape', (['x_train_new', '(-1, 8, 8, 1)'], {}), '(x_train_new, (-1, 8, 8, 1))\n', (678, 706), True, 'import numpy as np\n'), ((714, 732), 'numpy.load', 'np.load', (['"""256.npy"""'], {}), "('256.npy')\n", (721, 732), True, 'import numpy as np\n'), ((779, 810), 'numpy.reshape', 'np.reshape', (['data_test', '(-1, 17)'], {}), '(data_test, (-1, 17))\n', (789, 810), True, 'import numpy as np\n'), ((864, 894), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 4, 4)'], {}), '(x_test, (-1, 4, 4))\n', (874, 894), True, 'import numpy as np\n'), ((1176, 1213), 'numpy.reshape', 'np.reshape', (['x_test_new', '(-1, 8, 8, 1)'], {}), '(x_test_new, (-1, 8, 8, 1))\n', (1186, 1213), True, 'import numpy as np\n'), ((1250, 1285), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train'], {}), '(y_train)\n', (1276, 1285), False, 'import keras\n'), ((1322, 1356), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test'], {}), '(y_test)\n', (1348, 1356), False, 'import keras\n'), ((1364, 1376), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1374, 1376), False, 'from keras.models import Sequential\n'), ((1387, 1492), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(4, 1)', 'strides': '(1)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': '(8, 8, 1)'}), "(64, kernel_size=(4, 1), strides=1, padding='valid', activation=\n 'relu', input_shape=(8, 8, 1))\n", (1393, 1492), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1491, 1569), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(1, 4)', 'strides': '(1)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(128, kernel_size=(1, 4), strides=1, padding='valid', activation='relu')\n", (1497, 1569), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1576, 1653), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(2, 2)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, kernel_size=(2, 2), strides=1, padding='same', activation='relu')\n", (1582, 1653), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1660, 1737), 'keras.layers.Conv2D', 'Conv2D', (['(200)'], {'kernel_size': '(2, 2)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(200, kernel_size=(2, 2), strides=1, padding='same', activation='relu')\n", (1666, 1737), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1744, 1756), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1751, 1756), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1768, 1777), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1775, 1777), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1789, 1821), 'keras.layers.Dense', 'Dense', (['(120)'], {'activation': '"""softmax"""'}), "(120, activation='softmax')\n", (1794, 1821), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1832, 1863), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""softmax"""'}), "(64, activation='softmax')\n", (1837, 1863), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((1874, 1904), 'keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (1879, 1904), False, 'from keras.layers import Conv2D, Dense, Flatten, Dropout\n'), ((579, 611), 'numpy.concatenate', 'np.concatenate', (['(m1, m2)'], {'axis': '(0)'}), '((m1, m2), axis=0)\n', (593, 611), True, 'import numpy as np\n'), ((613, 645), 'numpy.concatenate', 'np.concatenate', (['(m4, m3)'], {'axis': '(0)'}), '((m4, m3), axis=0)\n', (627, 645), True, 'import numpy as np\n'), ((1088, 1120), 'numpy.concatenate', 'np.concatenate', (['(m1, m2)'], {'axis': '(0)'}), '((m1, m2), axis=0)\n', (1102, 1120), True, 'import numpy as np\n'), ((1122, 1154), 'numpy.concatenate', 'np.concatenate', (['(m4, m3)'], {'axis': '(0)'}), '((m4, m3), axis=0)\n', (1136, 1154), True, 'import numpy as np\n')] |
# coding: utf-8
__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
import numpy as np
def nms_standard(dets, thresh):
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def bb_intersection_over_union(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def filter_boxes(boxes, scores, labels, thr):
new_boxes = []
for i in range(boxes.shape[0]):
box = []
for j in range(boxes.shape[1]):
label = labels[i, j].astype(np.int64)
score = scores[i, j]
if score < thr:
break
# Fix for mirror predictions
if i == 0:
b = [int(label), float(score), float(boxes[i, j, 0]), float(boxes[i, j, 1]), float(boxes[i, j, 2]), float(boxes[i, j, 3])]
else:
b = [int(label), float(score), 1 - float(boxes[i, j, 2]), float(boxes[i, j, 1]), 1 - float(boxes[i, j, 0]), float(boxes[i, j, 3])]
box.append(b)
new_boxes.append(box)
return new_boxes
def filter_boxes_v2(boxes, scores, labels, thr):
new_boxes = []
for t in range(len(boxes)):
for i in range(len(boxes[t])):
box = []
for j in range(boxes[t][i].shape[0]):
label = labels[t][i][j].astype(np.int64)
score = scores[t][i][j]
if score < thr:
break
# Mirror fix !!!
if i == 0:
b = [int(label), float(score), float(boxes[t][i][j, 0]), float(boxes[t][i][j, 1]), float(boxes[t][i][j, 2]), float(boxes[t][i][j, 3])]
else:
b = [int(label), float(score), 1 - float(boxes[t][i][j, 2]), float(boxes[t][i][j, 1]), 1 - float(boxes[t][i][j, 0]), float(boxes[t][i][j, 3])]
box.append(b)
# box = np.array(box)
new_boxes.append(box)
return new_boxes
def find_matching_box(boxes_list, new_box, match_iou=0.55):
best_iou = match_iou
best_index = -1
for i in range(len(boxes_list)):
box = boxes_list[i]
if box[0] != new_box[0]:
continue
iou = bb_intersection_over_union(box[2:], new_box[2:])
if iou > best_iou:
best_index = i
best_iou = iou
return best_index, best_iou
def merge_boxes_weighted(box1, box2, w1, w2, type):
box = [-1, -1, -1, -1, -1, -1]
box[0] = box1[0]
if type == 'avg':
box[1] = ((w1 * box1[1]) + (w2 * box2[1])) / (w1 + w2)
elif type == 'max':
box[1] = max(box1[1], box2[1])
elif type == 'mul':
box[1] = np.sqrt(box1[1]*box2[1])
else:
exit()
box[2] = (w1*box1[2] + w2*box2[2]) / (w1 + w2)
box[3] = (w1*box1[3] + w2*box2[3]) / (w1 + w2)
box[4] = (w1*box1[4] + w2*box2[4]) / (w1 + w2)
box[5] = (w1*box1[5] + w2*box2[5]) / (w1 + w2)
return box
def merge_all_boxes_for_image(boxes, intersection_thr=0.55, type='avg'):
new_boxes = boxes[0].copy()
init_weight = 1/len(boxes)
weights = [init_weight] * len(new_boxes)
for j in range(1, len(boxes)):
for k in range(len(boxes[j])):
index, best_iou = find_matching_box(new_boxes, boxes[j][k], intersection_thr)
if index != -1:
new_boxes[index] = merge_boxes_weighted(new_boxes[index], boxes[j][k], weights[index], init_weight, type)
weights[index] += init_weight
else:
new_boxes.append(boxes[j][k])
weights.append(init_weight)
for i in range(len(new_boxes)):
new_boxes[i][1] *= weights[i]
return np.array(new_boxes)
| [
"numpy.sqrt",
"numpy.minimum",
"numpy.where",
"numpy.array",
"numpy.maximum"
] | [((5046, 5065), 'numpy.array', 'np.array', (['new_boxes'], {}), '(new_boxes)\n', (5054, 5065), True, 'import numpy as np\n'), ((402, 434), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (412, 434), True, 'import numpy as np\n'), ((449, 481), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (459, 481), True, 'import numpy as np\n'), ((496, 528), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (506, 528), True, 'import numpy as np\n'), ((543, 575), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (553, 575), True, 'import numpy as np\n'), ((589, 619), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1 + 1)'], {}), '(0.0, xx2 - xx1 + 1)\n', (599, 619), True, 'import numpy as np\n'), ((632, 662), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1 + 1)'], {}), '(0.0, yy2 - yy1 + 1)\n', (642, 662), True, 'import numpy as np\n'), ((761, 784), 'numpy.where', 'np.where', (['(ovr <= thresh)'], {}), '(ovr <= thresh)\n', (769, 784), True, 'import numpy as np\n'), ((4038, 4064), 'numpy.sqrt', 'np.sqrt', (['(box1[1] * box2[1])'], {}), '(box1[1] * box2[1])\n', (4045, 4064), True, 'import numpy as np\n')] |
import tensorflow as tf
from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample
from tensorbayes.distributions import log_bernoulli_with_logits, log_normal
from tensorbayes.tbutils import cross_entropy_with_logits
import numpy as np
import sys
# vae subgraphs
def qy_graph(x, k=10):
reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='qy')) > 0
# -- q(y)
# Is it better to call it q(y|x)?
with tf.variable_scope('qy'):
h1 = Dense(x, 512, 'layer1', tf.nn.relu, reuse=reuse)
h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)
qy_logit = Dense(h2, k, 'logit', reuse=reuse)
qy = tf.nn.softmax(qy_logit, name='prob')
return qy_logit, qy
def qz_graph(x, y):
reuse = len(tf.get_collection(tf.GraphKeys.VARIABLES, scope='qz')) > 0
# -- q(z)
# Is it better to call it q(z|x,y)?
with tf.variable_scope('qz'):
xy = tf.concat(1, (x, y), name='xy/concat')
h1 = Dense(xy, 512, 'layer1', tf.nn.relu, reuse=reuse)
h2 = Dense(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)
zm = Dense(h2, 64, 'zm', reuse=reuse)
zv = Dense(h2, 64, 'zv', tf.nn.softplus, reuse=reuse)
z = GaussianSample(zm, zv, 'z')
return z, zm, zv
def labeled_loss(x, px_logit, z, zm, zv, zm_prior, zv_prior):
xy_loss = -log_bernoulli_with_logits(x, px_logit)
xy_loss += log_normal(z, zm, zv) - log_normal(z, zm_prior, zv_prior)
return xy_loss - np.log(0.1)
| [
"tensorflow.variable_scope",
"tensorbayes.distributions.log_bernoulli_with_logits",
"numpy.log",
"tensorflow.get_collection",
"tensorbayes.layers.GaussianSample",
"tensorflow.concat",
"tensorflow.nn.softmax",
"tensorbayes.layers.Dense",
"tensorbayes.distributions.log_normal"
] | [((440, 463), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""qy"""'], {}), "('qy')\n", (457, 463), True, 'import tensorflow as tf\n'), ((478, 526), 'tensorbayes.layers.Dense', 'Dense', (['x', '(512)', '"""layer1"""', 'tf.nn.relu'], {'reuse': 'reuse'}), "(x, 512, 'layer1', tf.nn.relu, reuse=reuse)\n", (483, 526), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((540, 589), 'tensorbayes.layers.Dense', 'Dense', (['h1', '(512)', '"""layer2"""', 'tf.nn.relu'], {'reuse': 'reuse'}), "(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)\n", (545, 589), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((609, 643), 'tensorbayes.layers.Dense', 'Dense', (['h2', 'k', '"""logit"""'], {'reuse': 'reuse'}), "(h2, k, 'logit', reuse=reuse)\n", (614, 643), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((657, 693), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['qy_logit'], {'name': '"""prob"""'}), "(qy_logit, name='prob')\n", (670, 693), True, 'import tensorflow as tf\n'), ((877, 900), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""qz"""'], {}), "('qz')\n", (894, 900), True, 'import tensorflow as tf\n'), ((915, 953), 'tensorflow.concat', 'tf.concat', (['(1)', '(x, y)'], {'name': '"""xy/concat"""'}), "(1, (x, y), name='xy/concat')\n", (924, 953), True, 'import tensorflow as tf\n'), ((967, 1016), 'tensorbayes.layers.Dense', 'Dense', (['xy', '(512)', '"""layer1"""', 'tf.nn.relu'], {'reuse': 'reuse'}), "(xy, 512, 'layer1', tf.nn.relu, reuse=reuse)\n", (972, 1016), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((1030, 1079), 'tensorbayes.layers.Dense', 'Dense', (['h1', '(512)', '"""layer2"""', 'tf.nn.relu'], {'reuse': 'reuse'}), "(h1, 512, 'layer2', tf.nn.relu, reuse=reuse)\n", (1035, 1079), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((1093, 1125), 'tensorbayes.layers.Dense', 'Dense', (['h2', '(64)', '"""zm"""'], {'reuse': 'reuse'}), "(h2, 64, 'zm', reuse=reuse)\n", (1098, 1125), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((1139, 1187), 'tensorbayes.layers.Dense', 'Dense', (['h2', '(64)', '"""zv"""', 'tf.nn.softplus'], {'reuse': 'reuse'}), "(h2, 64, 'zv', tf.nn.softplus, reuse=reuse)\n", (1144, 1187), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((1200, 1227), 'tensorbayes.layers.GaussianSample', 'GaussianSample', (['zm', 'zv', '"""z"""'], {}), "(zm, zv, 'z')\n", (1214, 1227), False, 'from tensorbayes.layers import Constant, Placeholder, Dense, GaussianSample\n'), ((1327, 1365), 'tensorbayes.distributions.log_bernoulli_with_logits', 'log_bernoulli_with_logits', (['x', 'px_logit'], {}), '(x, px_logit)\n', (1352, 1365), False, 'from tensorbayes.distributions import log_bernoulli_with_logits, log_normal\n'), ((1381, 1402), 'tensorbayes.distributions.log_normal', 'log_normal', (['z', 'zm', 'zv'], {}), '(z, zm, zv)\n', (1391, 1402), False, 'from tensorbayes.distributions import log_bernoulli_with_logits, log_normal\n'), ((1405, 1438), 'tensorbayes.distributions.log_normal', 'log_normal', (['z', 'zm_prior', 'zv_prior'], {}), '(z, zm_prior, zv_prior)\n', (1415, 1438), False, 'from tensorbayes.distributions import log_bernoulli_with_logits, log_normal\n'), ((1460, 1471), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (1466, 1471), True, 'import numpy as np\n'), ((320, 373), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""qy"""'}), "(tf.GraphKeys.VARIABLES, scope='qy')\n", (337, 373), True, 'import tensorflow as tf\n'), ((755, 808), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""qz"""'}), "(tf.GraphKeys.VARIABLES, scope='qz')\n", (772, 808), True, 'import tensorflow as tf\n')] |
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator
import numpy as np
class Regressor(BaseEstimator):
def __init__(self):
self.n_components = 10
self.n_estimators = 40
self.learning_rate = 0.2
self.list_molecule = ['A', 'B', 'Q', 'R']
self.dict_reg = {}
for mol in self.list_molecule:
self.dict_reg[mol] = Pipeline([
('pca', PCA(n_components=self.n_components)),
('reg', GradientBoostingRegressor(
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
random_state=42))
])
def fit(self, X, y):
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_mol = y[ind_mol]
self.dict_reg[mol].fit(X_mol, np.log(y_mol))
def predict(self, X):
y_pred = np.zeros(X.shape[0])
for i, mol in enumerate(self.list_molecule):
ind_mol = np.where(np.argmax(X[:, -4:], axis=1) == i)[0]
X_mol = X[ind_mol]
y_pred[ind_mol] = np.exp(self.dict_reg[mol].predict(X_mol))
return y_pred
| [
"sklearn.decomposition.PCA",
"numpy.log",
"numpy.argmax",
"numpy.zeros",
"sklearn.ensemble.GradientBoostingRegressor"
] | [((1085, 1105), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (1093, 1105), True, 'import numpy as np\n'), ((1026, 1039), 'numpy.log', 'np.log', (['y_mol'], {}), '(y_mol)\n', (1032, 1039), True, 'import numpy as np\n'), ((526, 561), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'self.n_components'}), '(n_components=self.n_components)\n', (529, 561), False, 'from sklearn.decomposition import PCA\n'), ((588, 701), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'n_estimators': 'self.n_estimators', 'learning_rate': 'self.learning_rate', 'random_state': '(42)'}), '(n_estimators=self.n_estimators, learning_rate=\n self.learning_rate, random_state=42)\n', (613, 701), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((884, 912), 'numpy.argmax', 'np.argmax', (['X[:, -4:]'], {'axis': '(1)'}), '(X[:, -4:], axis=1)\n', (893, 912), True, 'import numpy as np\n'), ((1190, 1218), 'numpy.argmax', 'np.argmax', (['X[:, -4:]'], {'axis': '(1)'}), '(X[:, -4:], axis=1)\n', (1199, 1218), True, 'import numpy as np\n')] |
import pickle
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
def main():
directory_string = '~/Desktop/DEAP/data_preprocessed_python'
directory = os.path.expanduser(directory_string)
os.makedirs('data', exist_ok=True)
print("Importing data...")
for file in tqdm(sorted(os.listdir(directory))):
filename = os.fsdecode(file)
data_file_path = os.path.join(directory, filename)
if filename.endswith(".dat"):
data_file = open(data_file_path, 'rb')
pickle_file = pickle.load(data_file, encoding='latin1')
text_file = open(os.path.join('data/', os.path.splitext(filename)[0]) + ".txt", 'wb')
pickle.dump(pickle_file, text_file)
data_file.close()
text_file.close()
def change_label_values_to_calss (all_labels):
temp_labels = np.empty((40,4), dtype=object)
for i in range(0, len(all_labels)):
for j in range(0, np.size(all_labels, 1)):
if(all_labels[i][j] <= 5):
temp_labels[i][j] = 'L'
else:
temp_labels[i][j] = 'H '
emotions_label = np.array([['V', 'A', 'D', 'L']] * 40)
return temp_labels + emotions_label
if __name__ == "__main__":
main() | [
"os.listdir",
"pickle.dump",
"os.makedirs",
"numpy.size",
"os.path.join",
"pickle.load",
"os.path.splitext",
"numpy.array",
"numpy.empty",
"os.fsdecode",
"os.path.expanduser"
] | [((199, 235), 'os.path.expanduser', 'os.path.expanduser', (['directory_string'], {}), '(directory_string)\n', (217, 235), False, 'import os\n'), ((240, 274), 'os.makedirs', 'os.makedirs', (['"""data"""'], {'exist_ok': '(True)'}), "('data', exist_ok=True)\n", (251, 274), False, 'import os\n'), ((875, 906), 'numpy.empty', 'np.empty', (['(40, 4)'], {'dtype': 'object'}), '((40, 4), dtype=object)\n', (883, 906), True, 'import numpy as np\n'), ((1152, 1189), 'numpy.array', 'np.array', (["([['V', 'A', 'D', 'L']] * 40)"], {}), "([['V', 'A', 'D', 'L']] * 40)\n", (1160, 1189), True, 'import numpy as np\n'), ((379, 396), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (390, 396), False, 'import os\n'), ((421, 454), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (433, 454), False, 'import os\n'), ((336, 357), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (346, 357), False, 'import os\n'), ((565, 606), 'pickle.load', 'pickle.load', (['data_file'], {'encoding': '"""latin1"""'}), "(data_file, encoding='latin1')\n", (576, 606), False, 'import pickle\n'), ((713, 748), 'pickle.dump', 'pickle.dump', (['pickle_file', 'text_file'], {}), '(pickle_file, text_file)\n', (724, 748), False, 'import pickle\n'), ((973, 995), 'numpy.size', 'np.size', (['all_labels', '(1)'], {}), '(all_labels, 1)\n', (980, 995), True, 'import numpy as np\n'), ((656, 682), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (672, 682), False, 'import os\n')] |
import time, datetime
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets
from torch.autograd import Variable
from learning.utils_learn import *
from learning.dataloader import SegList, SegListMS, get_loader, get_info
import logging
from learning.validate import validate
import data_transforms as transforms
from dataloaders.utils import decode_segmap
from torch.utils.tensorboard import SummaryWriter
import logging
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def mtask_forone_grad(val_loader, model, criterion, task_name, args, test_vis=False):
grad_sum = 0
cnt = 0
model.eval()
score = AverageMeter()
print('task to be calculated gradients', task_name)
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# input.requires_grad_()
input_var = torch.autograd.Variable(input, requires_grad=True)
# input.retain_grad()
output = model(input_var)
first_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
if first_loss is None:
first_loss = c_name
# print('l output target', output)
# print('ratget', target)
loss_dict[c_name] = criterion_fun(output, target)
# print('caname', c_name, loss_dict[c_name])
else:
loss_dict[c_name] = criterion_fun(output[c_name], target[c_name])
grad_total_loss = None
for each in task_name:
if grad_total_loss is None:
grad_total_loss = loss_dict[each]
else:
grad_total_loss = grad_total_loss + loss_dict[each]
# grad_total_loss = loss_dict['segmentsemantic'] + loss_dict['depth_zbuffer']
grad_total_loss.backward()
# print('deug val in grad in bugger grad', input_var.grad) # Interesting, here we also able to get the grad
if test_vis:
from learning.utils_learn import accuracy
score.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
# TODO: shit, the following code could not calculate the grad even if I specify. For unknown reason. drive me high fever
#
# first = True
# for c_name, criterion_fun in criterion.items():
# # print('if in',c_name, task_name)
#
# if c_name in task_name:
# print('get one')
# # loss_calculate = criterion[c_name](output[c_name], target[c_name])
# loss_calculate = criterion_fun(output[c_name], target[c_name])
#
#
# # loss_fn = lambda x, y: torch.nn.functional.cross_entropy(x.float(), y.long().squeeze(dim=1), ignore_index=0,
# # reduction='mean')
# # loss_calculate = loss_fn(output[c_name], target[c_name].float())
#
#
# # o2 = criterion[c_name](output[c_name], target[c_name])
# # import pdb; pdb.set_trace()
# # loss_calculate = torch.mean(output[c_name] - target[c_name].float())
# if first:
# total_loss = loss_calculate
# first = False
#
# else:
# total_loss = total_loss + loss_calculate #TODO: vikram told me cannot be += here, because grad will override
#
#
# input.retain_grad()
# total_loss.backward()
#
# import pdb; pdb.set_trace()
# print(input_var)
# print(input_var.grad)
data_grad = input_var.grad
# print('data grad', data_grad)
np_data_grad = data_grad.cpu().numpy()
L2_grad_norm = np.linalg.norm(np_data_grad)
grad_sum += L2_grad_norm
# increment the batch # counter
cnt += 1
if args.debug:
if cnt>200:
break
if test_vis:
print('Clean Acc for Seg: {}'.format(score.avg))
print('Vulnerability in Grad Norm')
print("average grad for task {} :".format(task_name), grad_sum * 1.0 /cnt)
return grad_sum * 1.0 /cnt
from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city
from learning.utils_learn import accuracy
def mtask_forone_advacc(val_loader, model, criterion, task_name, args, info, epoch=0, writer=None,
comet=None, test_flag=False, test_vis=False, norm='Linf'):
"""
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
model.eval() # this is super important for correct including the batchnorm
print("using norm type", norm)
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
if args.steps == 0 or args.step_size == 0:
args.epsilon = 0
if norm == 'Linf':
if args.dataset == 'taskonomy':
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps, args.dataset,
args.step_size, info, args, using_noise=True)
elif args.dataset == 'cityscape':
adv_img = PGD_attack_mtask_city(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
elif norm == 'l2':
adv_img = PGD_attack_mtask_L2(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# image_var = input
if torch.cuda.is_available():
image_var = image_var.cuda()
for keys, m in mask.items():
mask[keys] = m.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
with torch.no_grad():
output = model(image_var)
sum_loss = None
loss_dict = {}
for c_name, criterion_fun in criterion.items():
this_loss = criterion_fun(output[c_name].float(), target[c_name],
mask[c_name])
if sum_loss is None:
sum_loss = this_loss
else:
sum_loss = sum_loss + this_loss
loss_dict[c_name] = this_loss
avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
# if comet is not None: comet.log_image(back_transform(input, info)[0].cpu(), name='Val/image clean ', image_channels='first')
# if comet is not None: comet.log_image(back_transform(adv_img, info)[0].cpu(), name='Val/image adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_target, name='Val/image gt for adv ', image_channels='first')
# if comet is not None: comet.log_image(decoded_class_prediction, name='Val/image adv prediction ', image_channels='first')
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()), input.size(0))
#TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target['segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if args.debug:
if i>1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys, loss=avg_losses[keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
if comet is not None: comet.log_metric('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if comet is not None: comet.log_metric('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg)
if 'segmentsemantic' in criterion.keys() or 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked IOU', mIoU)
if comet is not None: comet.log_metric('segmentsemantic Attacked Score', seg_accuracy)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou' : mIoU,
'loss' : avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
#Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses
def mtask_test_all(val_loader, model, criterion, task_name, all_task_name_list, args, info, writer=None, epoch=0,
test_flag=False, test_vis=False):
"""
task name: is not sorted here, so can be rigorously define the sequence of tasks
all_task_name_list: make the task under attack first.
NOTE: test_flag is for the case when we are testing for multiple models, need to return something to be able to plot and analyse
"""
assert len(task_name) > 0
avg_losses = {}
num_classes = args.classes
hist = np.zeros((num_classes, num_classes))
num_of_tasks = len(all_task_name_list)
for c_name, criterion_fun in criterion.items():
avg_losses[c_name] = AverageMeter()
seg_accuracy = AverageMeter()
seg_clean_accuracy = AverageMeter()
matrix_cos_all = np.zeros((num_of_tasks, num_of_tasks))
matrix_cos = np.zeros((num_of_tasks, num_of_tasks))
grad_norm_list_all = np.zeros((num_of_tasks))
grad_norm_list = np.zeros((num_of_tasks))
grad_norm_joint_all = 0
model.eval() # this is super important for correct including the batchnorm
for i, (input, target, mask) in enumerate(val_loader):
if test_vis:
clean_output = model(Variable(input.cuda(), requires_grad=False))
seg_clean_accuracy.update(
accuracy(clean_output['segmentsemantic'], target['segmentsemantic'].long().cuda()),
input.size(0))
adv_img = PGD_attack_mtask(input, target, mask, model, criterion, task_name, args.epsilon, args.steps,
args.dataset,
args.step_size, info, args, using_noise=True)
# image_var = Variable(adv_img.data, requires_grad=False)
image_var = adv_img.data
# print("diff", torch.sum(torch.abs(raw_input-image_var)))
grad_list = []
if torch.cuda.is_available():
for keys, tar in mask.items():
mask[keys] = tar.cuda()
input = input.cuda()
for keys, tar in target.items():
target[keys] = tar.cuda()
total_grad = None
for jj, each in enumerate(all_task_name_list):
input_var = torch.autograd.Variable(input, requires_grad=True)
output = model(input_var)
# total_loss = criterion['Loss'](output, target)
loss_task = criterion[each](output[each], target[each], mask[each])
loss_task.backward()
grad = input_var.grad.cpu().numpy()
grad_norm_list[jj] = np.linalg.norm(grad)
grad_normalized = grad / np.linalg.norm(grad)
grad_list.append(grad_normalized)
input_var = torch.autograd.Variable(input, requires_grad=True)
output = model(input_var)
total_loss = 0
for jj, each in enumerate(all_task_name_list):
total_loss = total_loss + criterion[each](output[each], target[each], mask[each])
total_loss.backward()
total_grad = input_var.grad.cpu().numpy()
grad_norm_joint_all += np.linalg.norm(total_grad)
total_grad = total_grad / np.linalg.norm(
total_grad) # TODO: this is crucial for preventing GPU memory leak,
for row in range(num_of_tasks):
for column in range(num_of_tasks):
if row < column:
matrix_cos[row, column] = np.sum(np.multiply(grad_list[row], grad_list[column]))
elif row == column:
matrix_cos[row, row] = np.sum(np.multiply(grad_list[row], total_grad))
matrix_cos_all = matrix_cos_all + matrix_cos
grad_norm_list_all = grad_norm_list_all + grad_norm_list
with torch.no_grad():
output = model(image_var)
# first_loss = None
# loss_dict = {}
# for c_name, criterion_fun in criterion.items():
# # if c_name in task_name:
# if first_loss is None:
# first_loss = c_name
# loss_dict[c_name] = criterion_fun(output, target)
# else:
# loss_dict[c_name] = criterion_fun(output[c_name], target[c_name])
# avg_losses[c_name].update(loss_dict[c_name].data.item(), input.size(0))
for c_name, criterion_fun in criterion.items():
avg_losses[c_name].update(criterion_fun(output[c_name], target[c_name], mask[c_name]).data.item(), input.size(0))
if 'segmentsemantic' in criterion.keys():
# this is accuracy for segmentation
seg_accuracy.update(accuracy(output['segmentsemantic'], target['segmentsemantic'].long()),
input.size(0))
# TODO: also mIOU here
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
target_seg = target['segmentsemantic'].cpu().data.numpy() if torch.cuda.is_available() else target[
'segmentsemantic'].data.numpy()
class_prediction = class_prediction.cpu().data.numpy() if torch.cuda.is_available() else class_prediction.data.numpy()
hist += fast_hist(class_prediction.flatten(), target_seg.flatten(), num_classes)
if i % 500 == 0:
class_prediction = torch.argmax(output['segmentsemantic'], dim=1)
# print(target['segmentsemantic'].shape)
decoded_target = decode_segmap(
target['segmentsemantic'][0][0].cpu().data.numpy() if torch.cuda.is_available() else
target['segmentsemantic'][0][0].data.numpy(),
args.dataset)
decoded_target = np.moveaxis(decoded_target, 2, 0)
decoded_class_prediction = decode_segmap(
class_prediction[0].cpu().data.numpy() if torch.cuda.is_available() else class_prediction[
0].data.numpy(), args.dataset)
decoded_class_prediction = np.moveaxis(decoded_class_prediction, 2, 0)
if not test_flag:
writer.add_image('Val/image clean ', back_transform(input, info)[0])
writer.add_image('Val/image adv ', back_transform(adv_img, info)[0])
writer.add_image('Val/image gt for adv ', decoded_target)
writer.add_image('Val/image adv prediction ', decoded_class_prediction)
if args.debug:
if i > 1:
break
if test_vis:
print("clean seg accuracy: {}".format(seg_clean_accuracy.avg))
str_attack_result = ''
str_not_attacked_task_result = ''
for keys, loss_term in criterion.items():
if keys in task_name:
str_attack_result += 'Attacked Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys,
loss=avg_losses[keys])
else:
str_not_attacked_task_result += 'Not att Task Loss: {} {loss.val:.4f} ({loss.avg:.4f})\t'.format(keys,
loss=
avg_losses[
keys])
# Tensorboard logger
if not test_flag:
for keys, _ in criterion.items():
if keys in task_name:
writer.add_scalar('Val Adv Attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg, epoch)
else:
writer.add_scalar('Val Adv not attacked Task/ Avg Loss {}'.format(keys), avg_losses[keys].avg,
epoch)
if 'segmentsemantic' in criterion.keys():
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
mIoU = round(np.nanmean(ious), 2)
str_attack_result += '\n Segment Score ({score.avg:.3f}) \t'.format(score=seg_accuracy)
str_attack_result += ' Segment ===> mAP {}\n'.format(mIoU)
print('clean task')
print(str_not_attacked_task_result)
if test_flag:
dict_losses = {}
for key, loss_term in criterion.items():
dict_losses[key] = avg_losses[key].avg
# print(str_attack_result, "\nnew", avg_losses[keys].avg, "\n")
if 'segmentsemantic' in criterion.keys():
dict_losses['segmentsemantic'] = {'iou': mIoU,
'loss': avg_losses['segmentsemantic'].avg,
'seg_acc': seg_accuracy.avg}
print("These losses are returned", dict_losses)
# Compute the dictionary of losses that we want. Desired: {'segmentsemantic:[mIoU, cel],'keypoints2d':acc,'}
return dict_losses, matrix_cos_all, grad_norm_joint_all, grad_norm_list_all
# the matrix, here, task under attack is the first
| [
"logging.basicConfig",
"logging.getLogger",
"learning.attack.PGD_attack_mtask_L2",
"numpy.multiply",
"learning.attack.PGD_attack_mtask",
"learning.attack.PGD_attack_mtask_city",
"numpy.nanmean",
"numpy.zeros",
"torch.cuda.is_available",
"numpy.linalg.norm",
"numpy.moveaxis",
"torch.no_grad",
... | [((647, 681), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (666, 681), False, 'import logging\n'), ((691, 718), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (708, 718), False, 'import logging\n'), ((5163, 5199), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {}), '((num_classes, num_classes))\n', (5171, 5199), True, 'import numpy as np\n'), ((14106, 14142), 'numpy.zeros', 'np.zeros', (['(num_classes, num_classes)'], {}), '((num_classes, num_classes))\n', (14114, 14142), True, 'import numpy as np\n'), ((14380, 14418), 'numpy.zeros', 'np.zeros', (['(num_of_tasks, num_of_tasks)'], {}), '((num_of_tasks, num_of_tasks))\n', (14388, 14418), True, 'import numpy as np\n'), ((14436, 14474), 'numpy.zeros', 'np.zeros', (['(num_of_tasks, num_of_tasks)'], {}), '((num_of_tasks, num_of_tasks))\n', (14444, 14474), True, 'import numpy as np\n'), ((14501, 14523), 'numpy.zeros', 'np.zeros', (['num_of_tasks'], {}), '(num_of_tasks)\n', (14509, 14523), True, 'import numpy as np\n'), ((14547, 14569), 'numpy.zeros', 'np.zeros', (['num_of_tasks'], {}), '(num_of_tasks)\n', (14555, 14569), True, 'import numpy as np\n'), ((1035, 1060), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1058, 1060), False, 'import torch\n'), ((1237, 1287), 'torch.autograd.Variable', 'torch.autograd.Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (1260, 1287), False, 'import torch\n'), ((4191, 4219), 'numpy.linalg.norm', 'np.linalg.norm', (['np_data_grad'], {}), '(np_data_grad)\n', (4205, 4219), True, 'import numpy as np\n'), ((6894, 6919), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6917, 6919), False, 'import torch\n'), ((15030, 15191), 'learning.attack.PGD_attack_mtask', 'PGD_attack_mtask', (['input', 'target', 'mask', 'model', 'criterion', 'task_name', 'args.epsilon', 'args.steps', 'args.dataset', 'args.step_size', 'info', 'args'], {'using_noise': '(True)'}), '(input, target, mask, model, criterion, task_name, args.\n epsilon, args.steps, args.dataset, args.step_size, info, args,\n using_noise=True)\n', (15046, 15191), False, 'from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city\n'), ((15455, 15480), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15478, 15480), False, 'import torch\n'), ((16284, 16334), 'torch.autograd.Variable', 'torch.autograd.Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (16307, 16334), False, 'import torch\n'), ((16654, 16680), 'numpy.linalg.norm', 'np.linalg.norm', (['total_grad'], {}), '(total_grad)\n', (16668, 16680), True, 'import numpy as np\n'), ((7209, 7224), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7222, 7224), False, 'import torch\n'), ((8045, 8091), 'torch.argmax', 'torch.argmax', (["output['segmentsemantic']"], {'dim': '(1)'}), "(output['segmentsemantic'], dim=1)\n", (8057, 8091), False, 'import torch\n'), ((10565, 10611), 'torch.argmax', 'torch.argmax', (["output['segmentsemantic']"], {'dim': '(1)'}), "(output['segmentsemantic'], dim=1)\n", (10577, 10611), False, 'import torch\n'), ((12410, 12426), 'numpy.nanmean', 'np.nanmean', (['ious'], {}), '(ious)\n', (12420, 12426), True, 'import numpy as np\n'), ((15791, 15841), 'torch.autograd.Variable', 'torch.autograd.Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (15814, 15841), False, 'import torch\n'), ((16136, 16156), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (16150, 16156), True, 'import numpy as np\n'), ((16715, 16741), 'numpy.linalg.norm', 'np.linalg.norm', (['total_grad'], {}), '(total_grad)\n', (16729, 16741), True, 'import numpy as np\n'), ((17294, 17309), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17307, 17309), False, 'import torch\n'), ((18333, 18379), 'torch.argmax', 'torch.argmax', (["output['segmentsemantic']"], {'dim': '(1)'}), "(output['segmentsemantic'], dim=1)\n", (18345, 18379), False, 'import torch\n'), ((21529, 21545), 'numpy.nanmean', 'np.nanmean', (['ious'], {}), '(ious)\n', (21539, 21545), True, 'import numpy as np\n'), ((6000, 6161), 'learning.attack.PGD_attack_mtask', 'PGD_attack_mtask', (['input', 'target', 'mask', 'model', 'criterion', 'task_name', 'args.epsilon', 'args.steps', 'args.dataset', 'args.step_size', 'info', 'args'], {'using_noise': '(True)'}), '(input, target, mask, model, criterion, task_name, args.\n epsilon, args.steps, args.dataset, args.step_size, info, args,\n using_noise=True)\n', (6016, 6161), False, 'from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city\n'), ((6552, 6682), 'learning.attack.PGD_attack_mtask_L2', 'PGD_attack_mtask_L2', (['input', 'target', 'mask', 'model', 'criterion', 'task_name', 'args.epsilon', 'args.steps', 'args.dataset', 'args.step_size'], {}), '(input, target, mask, model, criterion, task_name, args.\n epsilon, args.steps, args.dataset, args.step_size)\n', (6571, 6682), False, 'from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city\n'), ((8165, 8190), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8188, 8190), False, 'import torch\n'), ((8305, 8330), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8328, 8330), False, 'import torch\n'), ((8524, 8570), 'torch.argmax', 'torch.argmax', (["output['segmentsemantic']"], {'dim': '(1)'}), "(output['segmentsemantic'], dim=1)\n", (8536, 8570), False, 'import torch\n'), ((8914, 8947), 'numpy.moveaxis', 'np.moveaxis', (['decoded_target', '(2)', '(0)'], {}), '(decoded_target, 2, 0)\n', (8925, 8947), True, 'import numpy as np\n'), ((9215, 9258), 'numpy.moveaxis', 'np.moveaxis', (['decoded_class_prediction', '(2)', '(0)'], {}), '(decoded_class_prediction, 2, 0)\n', (9226, 9258), True, 'import numpy as np\n'), ((10685, 10710), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10708, 10710), False, 'import torch\n'), ((10825, 10850), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10848, 10850), False, 'import torch\n'), ((16195, 16215), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (16209, 16215), True, 'import numpy as np\n'), ((18453, 18478), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18476, 18478), False, 'import torch\n'), ((18610, 18635), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (18633, 18635), False, 'import torch\n'), ((18829, 18875), 'torch.argmax', 'torch.argmax', (["output['segmentsemantic']"], {'dim': '(1)'}), "(output['segmentsemantic'], dim=1)\n", (18841, 18875), False, 'import torch\n'), ((19219, 19252), 'numpy.moveaxis', 'np.moveaxis', (['decoded_target', '(2)', '(0)'], {}), '(decoded_target, 2, 0)\n', (19230, 19252), True, 'import numpy as np\n'), ((19520, 19563), 'numpy.moveaxis', 'np.moveaxis', (['decoded_class_prediction', '(2)', '(0)'], {}), '(decoded_class_prediction, 2, 0)\n', (19531, 19563), True, 'import numpy as np\n'), ((6258, 6423), 'learning.attack.PGD_attack_mtask_city', 'PGD_attack_mtask_city', (['input', 'target', 'mask', 'model', 'criterion', 'task_name', 'args.epsilon', 'args.steps', 'args.dataset', 'args.step_size', 'info', 'args'], {'using_noise': '(True)'}), '(input, target, mask, model, criterion, task_name,\n args.epsilon, args.steps, args.dataset, args.step_size, info, args,\n using_noise=True)\n', (6279, 6423), False, 'from learning.attack import PGD_attack_mtask, PGD_attack_mtask_L2, PGD_attack_mtask_city\n'), ((8750, 8775), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8773, 8775), False, 'import torch\n'), ((9068, 9093), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9091, 9093), False, 'import torch\n'), ((16986, 17032), 'numpy.multiply', 'np.multiply', (['grad_list[row]', 'grad_list[column]'], {}), '(grad_list[row], grad_list[column])\n', (16997, 17032), True, 'import numpy as np\n'), ((19055, 19080), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19078, 19080), False, 'import torch\n'), ((19373, 19398), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19396, 19398), False, 'import torch\n'), ((17120, 17159), 'numpy.multiply', 'np.multiply', (['grad_list[row]', 'total_grad'], {}), '(grad_list[row], total_grad)\n', (17131, 17159), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import Module
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torchnlp.nn as nlpnn
class Net(Module):
def __init__(self, config, attention_net=False):
super(Net, self).__init__()
self.lstm = nn.LSTM(input_size=config.input_size, hidden_size=config.hidden_size,
num_layers=config.lstm_layers, batch_first=True,
dropout=config.dropout_rate)
self.attention = nlpnn.Attention(config.hidden_size)
self.attention_linear = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size)
self.linear = nn.Linear(in_features=config.hidden_size, out_features=config.output_size)
self.hidden_size = config.hidden_size # 128
self.time_step = config.time_step # 20
self.attention_net = attention_net # True: attention layer
def forward(self, x, hidden=None):
lstm_out, hidden = self.lstm(x, hidden)
y = 0
if self.attention_net:
query = self.attention_linear(torch.ones(x.shape[0], self.time_step, self.hidden_size))
attention_out, _ = self.attention(query, lstm_out)
y = self.linear(attention_out)
else:
y = self.linear(lstm_out)
return y, hidden
def train(x_train, y_train, config, attention_net=False):
print("Start training ...")
device = torch.device("cuda:0" if config.use_cuda and torch.cuda.is_available() else "cpu")
net = Net(config, attention_net).to(device)
train_x, train_y = torch.from_numpy(x_train).float(), torch.from_numpy(y_train).float()
train_loader = DataLoader(TensorDataset(train_x, train_y), batch_size=config.batch_size)
# totally s iterations
# s = train_x.shape[0]
if not attention_net:
epoches = config.epoch
else:
epoches = config.epoch_attention
optimizer = optim.Adam(net.parameters(), lr=config.learning_rate)
criterion = nn.MSELoss()
losses = [] # store losses of each iteration
epc_mean = [] # store mean losses of each epoch
for epoch in range(epoches):
epoch_loss = []
hidden = None
for i, data in enumerate(train_loader):
train_x, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
y_pred, hidden = net.forward(train_x, hidden)
# h_t, c_t = hidden
# h_t.detach_(), c_t.detach_()
# hidden = (h_t, c_t)
hidden = None
loss = criterion(y_pred, labels)
loss.backward()
optimizer.step()
losses.append(loss.item())
epoch_loss.append(loss.item())
cur_loss = np.mean(np.array(epoch_loss))
if cur_loss < 0.00017:
break
print("Epoch {}/{}".format(epoch + 1, config.epoch), " Train Loss :{}".format(cur_loss))
epc_mean.append(cur_loss)
torch.save(net.state_dict(), config.model_save_path + config.model_name)
print('Finished Training Trainset')
print('Net parameters are saved at {}'.format(config.model_save_path + config.model_name))
return losses, epc_mean
def loss_plot(losses):
plt.plot(losses)
plt.xlabel('epoch')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
def predict(x_test, config, attention_net=False):
device = torch.device("cuda:0" if config.use_cuda and torch.cuda.is_available() else "cpu")
y_pred = torch.empty((0, len(config.label_columns))).to(device)
y_hat = []
test_X = torch.from_numpy(x_test).float()
test_set = TensorDataset(test_X)
test_loader = DataLoader(test_set, batch_size=1)
net = Net(config, attention_net).to(device)
net.load_state_dict(torch.load(config.model_save_path + config.model_name))
net.eval()
hidden = None
for data in test_loader:
tmp = []
x = data[0].to(device)
y, hidden = net.forward(x, hidden)
hidden = None
# y_pred_0 = torch.cat((y_pred, y[0]), 0)
tmp.append(y[0][-1][0].item())
tmp.append(y[0][-1][1].item())
y_hat.append(tmp)
return np.array(y_hat)
def up_down_accuracy(y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
y_var_test=y_true[1:]-y_true[:len(y_true)-1]
y_var_predict=y_pred[1:]-y_pred[:len(y_pred)-1]
txt=np.zeros(len(y_var_test))
for i in range(len(y_var_test-1)):#计算数量
txt[i]=np.sign(y_var_test[i])==np.sign(y_var_predict[i])
result=sum(txt)/len(txt)
return result
def evaluate(y_pred, y_test, data_gainer, days=100):
labels_open = []
labels_close = []
for i in range(y_test.shape[0]):
labels_open.append(y_test[i][0])
for i in range(y_test.shape[0]):
labels_close.append(y_test[i][1])
print("###############################################################")
print("Evaluation of open price predction on test set:")
y_pred_0 = y_pred[:, 0] * data_gainer.std[0] + data_gainer.mean[0]
# Error comptuer of open price prediction
# Root Mean Square Error
RMSE = np.sqrt(np.sum((np.array(labels_open) - y_pred_0) ** 2) / len(labels_open))
# Mean Absolute Percentage Error
MAPE = np.sum((np.array(labels_open) - y_pred_0) / np.array(labels_open)) / len(labels_open) * 100
# Mean Bias Error
MBE = np.sum((np.array(labels_open) - y_pred_0)) / len(labels_open)
print("RMSE on validation set is {}".format(RMSE))
print("MAPE on validation set is {}".format(MAPE))
print("MBE on validation set is {}".format(MBE))
up_down_accu = up_down_accuracy(labels_open, y_pred_0)
print("Up and down accuracy on validation set is {}%".format(round(up_down_accu * 100), 2))
plt.subplot(2,1,1)
plt.xlabel('Days')
plt.ylabel('Price')
plt.title('Evaluation of Open prices on test set for 100 days')
plt.plot(y_pred_0.tolist()[:days], 'r', label="predict")
plt.plot(labels_open[:days], 'b', label="real")
plt.legend(loc="upper right")
# Error comptuer of close price prediction
print("###############################################################")
print("Evaluation of close price predction on valid set:")
y_pred_1 = y_pred[:, 1] * data_gainer.std[1] + data_gainer.mean[1]
# Error comptuer of open price prediction
# Root Mean Square Error
RMSE = np.sqrt(np.sum((np.array(labels_close) - y_pred_1) ** 2) / len(labels_close))
# Mean Absolute Percentage Error
MAPE = np.sum((np.array(labels_close) - y_pred_1) / np.array(labels_close)) / len(labels_close) * 100
# Mean Bias Error
MBE = np.sum((np.array(labels_close) - y_pred_1)) / len(labels_close)
print("RMSE on validation set is {}".format(RMSE))
print("MAPE on validation set is {}%".format(MAPE))
print("MBE on validation set is {}".format(MBE))
up_down_accu = up_down_accuracy(labels_close, y_pred_1)
print("Up and down accuracy on validation set is {}%".format(round(up_down_accu * 100), 2))
plt.subplot(2, 1, 2)
plt.xlabel('Days')
plt.ylabel('Price')
plt.title('Evaluation of Close prices on test set for 100 days')
plt.plot(y_pred_1.tolist()[:days], 'r', label="predict close")
plt.plot(labels_close[:days], 'b', label="real close")
plt.legend(loc="upper right")
plt.show() | [
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torchnlp.nn.Attention",
"torch.nn.LSTM",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.utils.data.TensorDataset",
"numpy.sign",
"matplotlib.pyplot.title",
"matplotlib... | [((2094, 2106), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2104, 2106), True, 'import torch.nn as nn\n'), ((3352, 3368), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (3360, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3392), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3383, 3392), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3424), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""training loss"""'], {}), "('training loss')\n", (3407, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3429, 3463), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss history"""'], {}), "('Training Loss history')\n", (3438, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3478), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3476, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3771, 3792), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_X'], {}), '(test_X)\n', (3784, 3792), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3811, 3845), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': '(1)'}), '(test_set, batch_size=1)\n', (3821, 3845), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((4323, 4338), 'numpy.array', 'np.array', (['y_hat'], {}), '(y_hat)\n', (4331, 4338), True, 'import numpy as np\n'), ((4391, 4407), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (4399, 4407), True, 'import numpy as np\n'), ((4421, 4437), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4429, 4437), True, 'import numpy as np\n'), ((5915, 5935), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5926, 5935), True, 'import matplotlib.pyplot as plt\n'), ((5938, 5956), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (5948, 5956), True, 'import matplotlib.pyplot as plt\n'), ((5961, 5980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (5971, 5980), True, 'import matplotlib.pyplot as plt\n'), ((5985, 6048), 'matplotlib.pyplot.title', 'plt.title', (['"""Evaluation of Open prices on test set for 100 days"""'], {}), "('Evaluation of Open prices on test set for 100 days')\n", (5994, 6048), True, 'import matplotlib.pyplot as plt\n'), ((6114, 6161), 'matplotlib.pyplot.plot', 'plt.plot', (['labels_open[:days]', '"""b"""'], {'label': '"""real"""'}), "(labels_open[:days], 'b', label='real')\n", (6122, 6161), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6195), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6176, 6195), True, 'import matplotlib.pyplot as plt\n'), ((7185, 7205), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (7196, 7205), True, 'import matplotlib.pyplot as plt\n'), ((7210, 7228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (7220, 7228), True, 'import matplotlib.pyplot as plt\n'), ((7233, 7252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (7243, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7321), 'matplotlib.pyplot.title', 'plt.title', (['"""Evaluation of Close prices on test set for 100 days"""'], {}), "('Evaluation of Close prices on test set for 100 days')\n", (7266, 7321), True, 'import matplotlib.pyplot as plt\n'), ((7394, 7448), 'matplotlib.pyplot.plot', 'plt.plot', (['labels_close[:days]', '"""b"""'], {'label': '"""real close"""'}), "(labels_close[:days], 'b', label='real close')\n", (7402, 7448), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7482), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (7463, 7482), True, 'import matplotlib.pyplot as plt\n'), ((7487, 7497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7495, 7497), True, 'import matplotlib.pyplot as plt\n'), ((356, 512), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'config.input_size', 'hidden_size': 'config.hidden_size', 'num_layers': 'config.lstm_layers', 'batch_first': '(True)', 'dropout': 'config.dropout_rate'}), '(input_size=config.input_size, hidden_size=config.hidden_size,\n num_layers=config.lstm_layers, batch_first=True, dropout=config.\n dropout_rate)\n', (363, 512), True, 'import torch.nn as nn\n'), ((585, 620), 'torchnlp.nn.Attention', 'nlpnn.Attention', (['config.hidden_size'], {}), '(config.hidden_size)\n', (600, 620), True, 'import torchnlp.nn as nlpnn\n'), ((653, 727), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'config.hidden_size', 'out_features': 'config.hidden_size'}), '(in_features=config.hidden_size, out_features=config.hidden_size)\n', (662, 727), True, 'import torch.nn as nn\n'), ((750, 824), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'config.hidden_size', 'out_features': 'config.output_size'}), '(in_features=config.hidden_size, out_features=config.output_size)\n', (759, 824), True, 'import torch.nn as nn\n'), ((1776, 1807), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (1789, 1807), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3919, 3973), 'torch.load', 'torch.load', (['(config.model_save_path + config.model_name)'], {}), '(config.model_save_path + config.model_name)\n', (3929, 3973), False, 'import torch\n'), ((2880, 2900), 'numpy.array', 'np.array', (['epoch_loss'], {}), '(epoch_loss)\n', (2888, 2900), True, 'import numpy as np\n'), ((3723, 3747), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (3739, 3747), False, 'import torch\n'), ((4632, 4654), 'numpy.sign', 'np.sign', (['y_var_test[i]'], {}), '(y_var_test[i])\n', (4639, 4654), True, 'import numpy as np\n'), ((4656, 4681), 'numpy.sign', 'np.sign', (['y_var_predict[i]'], {}), '(y_var_predict[i])\n', (4663, 4681), True, 'import numpy as np\n'), ((1176, 1232), 'torch.ones', 'torch.ones', (['x.shape[0]', 'self.time_step', 'self.hidden_size'], {}), '(x.shape[0], self.time_step, self.hidden_size)\n', (1186, 1232), False, 'import torch\n'), ((1567, 1592), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1590, 1592), False, 'import torch\n'), ((1677, 1702), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (1693, 1702), False, 'import torch\n'), ((1712, 1737), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (1728, 1737), False, 'import torch\n'), ((3589, 3614), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3612, 3614), False, 'import torch\n'), ((5538, 5559), 'numpy.array', 'np.array', (['labels_open'], {}), '(labels_open)\n', (5546, 5559), True, 'import numpy as np\n'), ((6804, 6826), 'numpy.array', 'np.array', (['labels_close'], {}), '(labels_close)\n', (6812, 6826), True, 'import numpy as np\n'), ((5450, 5471), 'numpy.array', 'np.array', (['labels_open'], {}), '(labels_open)\n', (5458, 5471), True, 'import numpy as np\n'), ((6714, 6736), 'numpy.array', 'np.array', (['labels_close'], {}), '(labels_close)\n', (6722, 6736), True, 'import numpy as np\n'), ((5298, 5319), 'numpy.array', 'np.array', (['labels_open'], {}), '(labels_open)\n', (5306, 5319), True, 'import numpy as np\n'), ((5414, 5435), 'numpy.array', 'np.array', (['labels_open'], {}), '(labels_open)\n', (5422, 5435), True, 'import numpy as np\n'), ((6559, 6581), 'numpy.array', 'np.array', (['labels_close'], {}), '(labels_close)\n', (6567, 6581), True, 'import numpy as np\n'), ((6677, 6699), 'numpy.array', 'np.array', (['labels_close'], {}), '(labels_close)\n', (6685, 6699), True, 'import numpy as np\n')] |
"""Acyclic Graph Generator.
Generates a dataset out of an acyclic FCM.
Author : <NAME> and <NAME>
.. MIT License
..
.. Copyright (c) 2018 <NAME>
..
.. Permission is hereby granted, free of charge, to any person obtaining a copy
.. of this software and associated documentation files (the "Software"), to deal
.. in the Software without restriction, including without limitation the rights
.. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
.. copies of the Software, and to permit persons to whom the Software is
.. furnished to do so, subject to the following conditions:
..
.. The above copyright notice and this permission notice shall be included in all
.. copies or substantial portions of the Software.
..
.. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
.. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
.. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
.. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
.. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
.. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
.. SOFTWARE.
"""
from sklearn.preprocessing import scale
import numpy as np
import pandas as pd
import networkx as nx
from .causal_mechanisms import (LinearMechanism,
Polynomial_Mechanism,
SigmoidAM_Mechanism,
SigmoidMix_Mechanism,
GaussianProcessAdd_Mechanism,
GaussianProcessMix_Mechanism,
NN_Mechanism,
gmm_cause, normal_noise)
class AcyclicGraphGenerator(object):
"""Generates a cross-sectional dataset out of a cyclic FCM."""
def __init__(self, causal_mechanism, noise=normal_noise,
noise_coeff=.4,
initial_variable_generator=gmm_cause,
points=500, nodes=20, parents_max=5):
"""Generate an acyclic graph, given a causal mechanism.
:param initial_variable_generator: init variables of the graph
:param causal_mechanism: generating causes in the graph to
choose between: ['linear', 'polynomial', 'sigmoid_add',
'sigmoid_mix', 'gp_add', 'gp_mix']
"""
super(AcyclicGraphGenerator, self).__init__()
self.mechanism = {'linear': LinearMechanism,
'polynomial': Polynomial_Mechanism,
'sigmoid_add': SigmoidAM_Mechanism,
'sigmoid_mix': SigmoidMix_Mechanism,
'gp_add': GaussianProcessAdd_Mechanism,
'gp_mix': GaussianProcessMix_Mechanism,
'NN': NN_Mechanism}[causal_mechanism]
self.data = pd.DataFrame(None, columns=["V{}".format(i) for i in range(nodes)])
self.nodes = nodes
self.points = points
self.noise = normal_noise
self.noise_coeff = noise_coeff
self.adjacency_matrix = np.zeros((nodes, nodes))
self.parents_max = parents_max
self.initial_generator = initial_variable_generator
self.cfunctions = None
self.g = None
def init_variables(self, verbose=False):
"""Redefine the causes of the graph."""
for j in range(1, self.nodes):
nb_parents = np.random.randint(0, min([self.parents_max, j])+1)
for i in np.random.choice(range(0, j), nb_parents, replace=False):
self.adjacency_matrix[i, j] = 1
try:
self.g = nx.DiGraph(self.adjacency_matrix)
assert not list(nx.simple_cycles(self.g))
except AssertionError:
if verbose:
print("Regenerating, graph non valid...")
self.init_variables()
# Mechanisms
self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])),
self.points, self.noise, noise_coeff=self.noise_coeff)
if sum(self.adjacency_matrix[:, i])
else self.initial_generator for i in range(self.nodes)]
def generate(self, rescale=True):
"""Generate data from an FCM containing cycles."""
if self.cfunctions is None:
self.init_variables()
for i in nx.topological_sort(self.g):
# Root cause
if not sum(self.adjacency_matrix[:, i]):
self.data['V{}'.format(i)] = self.cfunctions[i](self.points)
# Generating causes
else:
self.data['V{}'.format(i)] = self.cfunctions[i](self.data.iloc[:, self.adjacency_matrix[:, i].nonzero()[0]].values)
if rescale:
self.data['V{}'.format(i)] = scale(self.data['V{}'.format(i)].values)
return self.g, self.data
def to_csv(self, fname_radical, **kwargs):
"""
Save data to the csv format by default, in two separate files.
Optional keyword arguments can be passed to pandas.
"""
if self.data is not None:
self.data.to_csv(fname_radical+'_data.csv', index=False, **kwargs)
pd.DataFrame(self.adjacency_matrix).to_csv(fname_radical \
+ '_target.csv',
index=False, **kwargs)
else:
raise ValueError("Graph has not yet been generated. \
Use self.generate() to do so.")
| [
"networkx.topological_sort",
"networkx.DiGraph",
"networkx.simple_cycles",
"numpy.zeros",
"pandas.DataFrame"
] | [((3146, 3170), 'numpy.zeros', 'np.zeros', (['(nodes, nodes)'], {}), '((nodes, nodes))\n', (3154, 3170), True, 'import numpy as np\n'), ((4462, 4489), 'networkx.topological_sort', 'nx.topological_sort', (['self.g'], {}), '(self.g)\n', (4481, 4489), True, 'import networkx as nx\n'), ((3694, 3727), 'networkx.DiGraph', 'nx.DiGraph', (['self.adjacency_matrix'], {}), '(self.adjacency_matrix)\n', (3704, 3727), True, 'import networkx as nx\n'), ((3756, 3780), 'networkx.simple_cycles', 'nx.simple_cycles', (['self.g'], {}), '(self.g)\n', (3772, 3780), True, 'import networkx as nx\n'), ((5302, 5337), 'pandas.DataFrame', 'pd.DataFrame', (['self.adjacency_matrix'], {}), '(self.adjacency_matrix)\n', (5314, 5337), True, 'import pandas as pd\n')] |
"""Difference from running average
with multiprocessing."""
import datetime
import sys
import cv2
import numpy as np
import mpipe
import coils
import util
DEVICE = int(sys.argv[1])
WIDTH = int(sys.argv[2])
HEIGHT = int(sys.argv[3])
DURATION = float(sys.argv[4]) # In seconds.
class Step1(mpipe.OrderedWorker):
def __init__(self):
self.image_acc = None # Maintain accumulation of thresholded differences.
self.tstamp_prev = None # Keep track of previous iteration's timestamp.
def doTask(self, image):
"""Compute difference between given image and accumulation,
then accumulate and return the difference. Initialize accumulation
if needed (if opacity is 100%.)"""
# Compute the alpha value.
alpha, self.tstamp_prev = util.getAlpha(self.tstamp_prev)
# Initalize accumulation if so indicated.
if self.image_acc is None:
self.image_acc = np.empty(np.shape(image))
# Compute difference.
image_diff = cv2.absdiff(
self.image_acc.astype(image.dtype),
image,
)
# Accumulate.
hello = cv2.accumulateWeighted(
image,
self.image_acc,
alpha,
)
return image_diff
# Monitor framerates for the given seconds past.
framerate = coils.RateTicker((1,5,10))
# Create the output window.
cv2.namedWindow('diff average 2', cv2.cv.CV_WINDOW_NORMAL)
def step2(image):
"""Display the image, stamped with framerate."""
fps_text = '{:.2f}, {:.2f}, {:.2f} fps'.format(*framerate.tick())
util.writeOSD(image, (fps_text,))
cv2.imshow('diff average 2', image)
cv2.waitKey(1) # Allow HighGUI to process event.
# Assemble the pipeline.
stage1 = mpipe.Stage(Step1)
stage2 = mpipe.OrderedStage(step2)
stage1.link(stage2)
pipe = mpipe.Pipeline(stage1)
# Create the OpenCV video capture object.
cap = cv2.VideoCapture(DEVICE)
cap.set(3, WIDTH)
cap.set(4, HEIGHT)
# Run the video capture loop, feeding the image processing pipeline.
end = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)
while end > datetime.datetime.now():
hello, image = cap.read()
pipe.put(image)
# Signal processing pipeline to stop.
pipe.put(None)
| [
"coils.RateTicker",
"numpy.shape",
"cv2.accumulateWeighted",
"util.getAlpha",
"util.writeOSD",
"mpipe.Stage",
"cv2.imshow",
"datetime.timedelta",
"datetime.datetime.now",
"cv2.VideoCapture",
"mpipe.OrderedStage",
"mpipe.Pipeline",
"cv2.waitKey",
"cv2.namedWindow"
] | [((1355, 1383), 'coils.RateTicker', 'coils.RateTicker', (['(1, 5, 10)'], {}), '((1, 5, 10))\n', (1371, 1383), False, 'import coils\n'), ((1411, 1469), 'cv2.namedWindow', 'cv2.namedWindow', (['"""diff average 2"""', 'cv2.cv.CV_WINDOW_NORMAL'], {}), "('diff average 2', cv2.cv.CV_WINDOW_NORMAL)\n", (1426, 1469), False, 'import cv2\n'), ((1779, 1797), 'mpipe.Stage', 'mpipe.Stage', (['Step1'], {}), '(Step1)\n', (1790, 1797), False, 'import mpipe\n'), ((1807, 1832), 'mpipe.OrderedStage', 'mpipe.OrderedStage', (['step2'], {}), '(step2)\n', (1825, 1832), False, 'import mpipe\n'), ((1860, 1882), 'mpipe.Pipeline', 'mpipe.Pipeline', (['stage1'], {}), '(stage1)\n', (1874, 1882), False, 'import mpipe\n'), ((1932, 1956), 'cv2.VideoCapture', 'cv2.VideoCapture', (['DEVICE'], {}), '(DEVICE)\n', (1948, 1956), False, 'import cv2\n'), ((1616, 1649), 'util.writeOSD', 'util.writeOSD', (['image', '(fps_text,)'], {}), '(image, (fps_text,))\n', (1629, 1649), False, 'import util\n'), ((1654, 1689), 'cv2.imshow', 'cv2.imshow', (['"""diff average 2"""', 'image'], {}), "('diff average 2', image)\n", (1664, 1689), False, 'import cv2\n'), ((1694, 1708), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1705, 1708), False, 'import cv2\n'), ((2070, 2093), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2091, 2093), False, 'import datetime\n'), ((2096, 2132), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'DURATION'}), '(seconds=DURATION)\n', (2114, 2132), False, 'import datetime\n'), ((2145, 2168), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2166, 2168), False, 'import datetime\n'), ((804, 835), 'util.getAlpha', 'util.getAlpha', (['self.tstamp_prev'], {}), '(self.tstamp_prev)\n', (817, 835), False, 'import util\n'), ((1162, 1214), 'cv2.accumulateWeighted', 'cv2.accumulateWeighted', (['image', 'self.image_acc', 'alpha'], {}), '(image, self.image_acc, alpha)\n', (1184, 1214), False, 'import cv2\n'), ((960, 975), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (968, 975), True, 'import numpy as np\n')] |
import numpy
import pyaudio
import threading
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE = 48100
self.BUFFERSIZE = 4024 * 2 # 1024 is a good buffer size
self.secToRecord = .2
self.threadsDieNow = False
self.newAudio = False
def setup(self):
"""initialize sound card."""
# TODO - windows detection vs. alsa or something for linux
# TODO - try/except for sound card selection/initiation
self.buffersToRecord = int(self.RATE * self.secToRecord / self.BUFFERSIZE)
if self.buffersToRecord == 0:
self.buffersToRecord = 1
self.samplesToRecord = int(self.BUFFERSIZE * self.buffersToRecord)
self.chunksToRecord = int(self.samplesToRecord / self.BUFFERSIZE)
self.secPerPoint = 1.0 / self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(
format=pyaudio.paInt16,
channels=1,
rate=self.RATE,
input=True,
frames_per_buffer=self.BUFFERSIZE,
input_device_index=0)
self.xsBuffer = numpy.arange(self.BUFFERSIZE) * self.secPerPoint
self.xs = numpy.arange(self.chunksToRecord * self.BUFFERSIZE) * self.secPerPoint
self.audio = numpy.empty((self.chunksToRecord * self.BUFFERSIZE), dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.continuousEnd()
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString = self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString, dtype=numpy.int16)
def record(self, forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow:
break
for i in range(self.chunksToRecord):
self.audio[i * self.BUFFERSIZE:(i + 1) * self.BUFFERSIZE] = self.getAudio()
self.newAudio = True
if forever is False:
break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow = True
if hasattr(self, 't') and self.t:
self.t.join()
### MATH ###
def downsample(self, data, mult):
"""Given 1D data, return the binned average."""
overhang = len(data) % mult
if overhang:
data = data[:-overhang]
data = numpy.reshape(data, (len(data) / mult, mult))
data = numpy.average(data, 1)
return data
def fft(self, data=None, trimBy=10, logScale=False, divBy=100):
if data is None:
data = self.audio.flatten()
left, right = numpy.split(numpy.abs(numpy.fft.fft(data)), 2)
ys = numpy.add(left, right[::-1])
if logScale:
ys = numpy.multiply(20, numpy.log10(ys))
xs = numpy.arange(self.BUFFERSIZE / 2, dtype=float)
if trimBy:
i = int((self.BUFFERSIZE / 2) / trimBy)
ys = ys[:i]
xs = xs[:i] * self.RATE / self.BUFFERSIZE
if divBy:
ys = ys / float(divBy)
return xs, ys
| [
"numpy.log10",
"numpy.add",
"numpy.average",
"numpy.fft.fft",
"numpy.fromstring",
"numpy.empty",
"threading.Thread",
"pyaudio.PyAudio",
"numpy.arange"
] | [((967, 984), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (982, 984), False, 'import pyaudio\n'), ((1399, 1468), 'numpy.empty', 'numpy.empty', (['(self.chunksToRecord * self.BUFFERSIZE)'], {'dtype': 'numpy.int16'}), '(self.chunksToRecord * self.BUFFERSIZE, dtype=numpy.int16)\n', (1410, 1468), False, 'import numpy\n'), ((1795, 1843), 'numpy.fromstring', 'numpy.fromstring', (['audioString'], {'dtype': 'numpy.int16'}), '(audioString, dtype=numpy.int16)\n', (1811, 1843), False, 'import numpy\n'), ((2337, 2373), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.record'}), '(target=self.record)\n', (2353, 2373), False, 'import threading\n'), ((2857, 2879), 'numpy.average', 'numpy.average', (['data', '(1)'], {}), '(data, 1)\n', (2870, 2879), False, 'import numpy\n'), ((3116, 3144), 'numpy.add', 'numpy.add', (['left', 'right[::-1]'], {}), '(left, right[::-1])\n', (3125, 3144), False, 'import numpy\n'), ((3232, 3278), 'numpy.arange', 'numpy.arange', (['(self.BUFFERSIZE / 2)'], {'dtype': 'float'}), '(self.BUFFERSIZE / 2, dtype=float)\n', (3244, 3278), False, 'import numpy\n'), ((1240, 1269), 'numpy.arange', 'numpy.arange', (['self.BUFFERSIZE'], {}), '(self.BUFFERSIZE)\n', (1252, 1269), False, 'import numpy\n'), ((1307, 1358), 'numpy.arange', 'numpy.arange', (['(self.chunksToRecord * self.BUFFERSIZE)'], {}), '(self.chunksToRecord * self.BUFFERSIZE)\n', (1319, 1358), False, 'import numpy\n'), ((3078, 3097), 'numpy.fft.fft', 'numpy.fft.fft', (['data'], {}), '(data)\n', (3091, 3097), False, 'import numpy\n'), ((3202, 3217), 'numpy.log10', 'numpy.log10', (['ys'], {}), '(ys)\n', (3213, 3217), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
# @Author: <NAME>, <NAME>
# @Date: 2019-11-20 10:08:49
# @Last Modified by: Daniel
# @Last Modified time: 2020-08-12 13:18:57
import numpy as np
from scipy.spatial.transform import Rotation
from scipy.sparse import csr_matrix
from scipy.spatial import HalfspaceIntersection, ConvexHull
from scipy.spatial.qhull import QhullError
import logging
import os
from sklearn.metrics import average_precision_score, precision_recall_curve
from .base import *
from enum import Enum
import itertools
import scipy
from packaging import version
from ummon.utils.average_utils import OnlineAverage
import itertools
if version.parse(scipy.__version__) < version.parse("1.4.0"):
Rotation.as_matrix = Rotation.as_dcm
Rotation.from_matrix = Rotation.from_dcm
__all__ = ['MeanIoU', 'MeanDistanceError', 'BinaryAccuracy', 'BinaryIoU', 'BinaryF1', 'BinaryRecall', 'BinaryPrecision',
'AveragePrecision']
__log = logging.getLogger('geometric_metrics_log')
os.makedirs("./logfile/", exist_ok=True)
# create file handler which logs even debug messages
fh = logging.FileHandler("./logfile/" + 'geometric_metrics.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
__log.addHandler(fh)
__log.propagate = True
#######
# activate logging with "logging.getLogger().setLevel(0)"
#######
def halfspace_representation(cuboid: dict) -> np.array:
"""compute half space representation from a cuboid (bounding cuboid)
Args:
cuboid (dict): contains the cuboid parameters.
Format: 'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix or quaternion
Returns:
np.array: cuboid in half space representation (shape [4xn+1])
"""
# create normal vectors from a sparse matrix
if len(cuboid['r'].shape) == 1:
cub_rot_m = Rotation.from_quat(cuboid['r']).as_matrix()
else:
cub_rot_m = cuboid['r']
p_dim = cuboid['d'].shape[0]
row = np.arange(2 * p_dim)
col = np.array(list(np.arange(p_dim)) * 2)
data = np.array([-1, 1]).repeat(p_dim)
norm_vec = csr_matrix((data, (row, col)), shape=(
p_dim * 2, p_dim)).toarray() # 4x2 or 6x3
# Rotation of axis aligned normal vectors
norm_vec_rot = np.matmul(norm_vec, cub_rot_m.T) # 4x2 or 6x3
# compute d parameter of plane representation
# p1 and p2 span the bounding cuboid volume (diagonal to each other)
p1 = cuboid['c'] + np.matmul(-(cuboid['d'] / 2), cub_rot_m.T)
p2 = cuboid['c'] + np.matmul((cuboid['d'] / 2), cub_rot_m.T)
# following possible because of special normal matrix order
d1 = -np.matmul(norm_vec_rot[0:p_dim, :], p1.reshape(-1, 1))
d2 = -np.matmul(norm_vec_rot[p_dim:p_dim * 2, :], p2.reshape(-1, 1))
d = np.concatenate((d1, d2), axis=0)
halfspaces = np.concatenate((norm_vec_rot, d), axis=1)
return halfspaces
def intersection(cuboid1: dict, cuboid2: dict) -> float:
"""compute the intersection of two arbitrary cuboids
Args:
cuboid1 (dict): cuboid 1 parameters.
Format: 'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
cuboid2 (dict): cuboid 2 parameters. Same data structure
Returns:
float: the volume of the intersection
"""
halfspaces1 = halfspace_representation(cuboid1)
halfspaces2 = halfspace_representation(cuboid2)
halfspaces = np.concatenate((halfspaces1, halfspaces2), axis=0)
# compute most likely point which is in the intersection area
fp_vec = cuboid2['c'] - cuboid1['c']
scale = cuboid1['d'] / (cuboid1['d'] + cuboid2['d'])
feasible_point = fp_vec * scale + cuboid1['c']
# run computation
try:
hs = HalfspaceIntersection(halfspaces, feasible_point)
hull = ConvexHull(hs.intersections)
except QhullError as e:
__log.debug("no intersection found. ERROR msg: {}".format(e))
return 0
return hull.volume
def iou(cuboid1: dict, cuboid2: dict) -> float:
intersect = intersection(cuboid1, cuboid2)
union = np.prod(cuboid1['d']) + np.prod(cuboid2['d']) - intersect
return intersect / union
class Sort(Enum):
IOU = 1 # Sort by IOU
CONFIDENCE_SCORE = 2 # Sort by output confidence score
DISTANCE = 3
def find_correspondences(output: list, target: list, threshold: float, sort: Sort) -> tuple:
"""
Finds correspondences between outputs and targets cuboids by matching cuboids which exceed the given IOU threshold.
Args:
output (list): list of predicted cuboids (dict)
target (list): list of target cuboids (dict)
threshold (float): threshold for IOU to count as correct detection
sort (Sort enum): sort to match output and targets, if set to Sort.CONFIDENCE_SCORE, output cuboids must
contain confidence_score.
Returns:
tuple (2,): tuple of output_to_target and target_to_ouput. Each is a np array and maps indices
from output to target and the other way round for each correspondence.
Array contains 'inf' if there is no match for the output or target.
"""
num_output = len(output)
num_target = len(target)
if sort == Sort.DISTANCE:
similarity_measure = MeanDistanceError().mean_distance_error
else:
similarity_measure = iou
# calculate all ious
sim_m = np.zeros((num_target, num_output))
for i_target, cuboid_target in enumerate(target):
for i_output, cuboid_output in enumerate(output):
sim_m[i_target, i_output] = similarity_measure(
cuboid_output, cuboid_target)
# calculate sorting, which to match first
if sort == Sort.CONFIDENCE_SCORE:
confidence_scores = np.array(
[cuboid['confidence_score'] for cuboid in output])
confidence_scores_argsort = np.argsort(-confidence_scores)
elif sort == Sort.IOU:
iou_argsort = np.argsort(-sim_m.reshape(-1))
elif sort == Sort.DISTANCE:
iou_argsort = np.argsort(sim_m.reshape(-1))
else:
raise NotImplemented
# maps indices from output to target and the other way round for each correspondence
output_to_target = np.full((num_output,), float('inf'))
target_to_output = np.full((num_target,), float('inf'))
for i, (output_i, target_i) in enumerate(itertools.product(list(range(num_output)), list(range(num_target)))):
# Get target t and output o indices
if sort == Sort.CONFIDENCE_SCORE:
t = target_i
o = confidence_scores_argsort[output_i]
elif sort == Sort.IOU or sort == Sort.DISTANCE:
t = int(iou_argsort[i] / num_output)
o = iou_argsort[i] % num_output
# to count as correct detection area the iou must exceed the threshold
if sim_m[t, o] > threshold and output_to_target[o] == float('inf') and target_to_output[t] == float('inf'):
output_to_target[o] = t
target_to_output[t] = o
return output_to_target, target_to_output
def calc_binary_confusion_matrix(output: list, target: list):
"""
Calculates TP, FP, FN_0, FN_1, TN for a output and target cuboid list of list (multiple scenes) by finding
correspondences between output and target cuboids with a IOU > 0.5. Correspondences with high IOU are matched first.
FN_0 are false negatives where a ground truth cuboid machted, but prediction has class id 0.
FN_1 are false negatives where no ground truth cuboid machted.
Args:
output (list): list of predicted cuboids (dict)
target (list): list of target cuboids (dict)
Returns:
tuple: number of TP, FP, FN_0, FN_1, TN
"""
TP = FP = FN_0 = FN_1 = TN = 0
for o, t in zip(output, target): # iter over scenes
output_to_target, target_to_output = find_correspondences(
o, t, 0.5, Sort.IOU)
output_class_ids = np.array([cuboid['class_id']
for cuboid in o], dtype=bool)
assert (~np.isinf(output_to_target)).sum() == (
~np.isinf(target_to_output)).sum()
TP += np.logical_and(~(np.isinf(output_to_target)),
output_class_ids).sum()
FP += np.logical_and(np.isinf(output_to_target),
output_class_ids).sum()
FN_0 += np.logical_and(~(np.isinf(output_to_target)),
np.logical_not(output_class_ids)).sum()
FN_1 += (np.isinf(target_to_output)).sum()
TN += np.logical_and(np.isinf(output_to_target),
np.logical_not(output_class_ids)).sum()
return TP, FP, FN_0, FN_1, TN
class ObjectDetectionMetric(OfflineMetric):
def __call__(self, output: list, target: list):
return self.func(output, target)
@classmethod
def __repr__(cls):
return cls.__name__
class BinaryIoU(ObjectDetectionMetric):
"""Compute IoU with confusion matrix (TP / (TP + FP + FN)) for binary object detection task.
Usage: binary_iou = BinaryIoU()
binary_iou(cuboids_output: list of list, cuboids_target: list of list) # cuboids wrapped in list of list (scenes)
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'class_id' -> class_id, either 0 or 1
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self):
self.func = self.accuracy
@staticmethod
def accuracy(output: list, target: list):
TP, FP, FN_0, FN_1, TN = calc_binary_confusion_matrix(output, target)
return TP / (TP + FP + FN_0 + FN_1)
class BinaryAccuracy(ObjectDetectionMetric):
"""Compute Accuracy with confusion matrix ((TP + TN) / (TP + TN + FP + FN + FN)) for binary object detection task.
Usage: binary_accuracy = BinaryAccuracy()
binary_accuracy(cuboids_output: list of list, cuboids_target: list of list) # cuboids wrapped in list of list (scenes)
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'class_id' -> class_id, either 0 or 1
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self):
self.func = self.accuracy
@staticmethod
def accuracy(output: list, target: list):
TP, FP, FN_0, FN_1, TN = calc_binary_confusion_matrix(output, target)
return (TP + TN) / (TP + TN + FP + FN_0 + FN_1)
class BinaryPrecision(ObjectDetectionMetric):
"""Compute Precision with confusion matrix (TP / (TP + FP)) for binary object detection task.
Usage: binary_precision = BinaryPrecision()
binary_precision(cuboids_output: list of list, cuboids_target: list of list) # cuboids wrapped in list of list (scenes)
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'class_id' -> class_id, either 0 or 1
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self):
self.func = self.precision
@staticmethod
def precision(output: list, target: list):
TP, FP, FN_0, FN_1, TN = calc_binary_confusion_matrix(output, target)
return TP / (TP + FP)
class BinaryRecall(ObjectDetectionMetric):
"""Compute Recall with confusion matrix (TP / (TP + FN)) for binary object detection task.
Usage: binary_recall = BinaryRecall()
binary_recall(cuboids_output: list of list, cuboids_target: list of list) # cuboids wrapped in list of list (scenes)
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'class_id' -> class_id, either 0 or 1
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self):
self.func = self.precision
@staticmethod
def precision(output: list, target: list):
TP, FP, FN_0, FN_1, TN = calc_binary_confusion_matrix(output, target)
return TP / (TP + FN_0 + FN_1)
class BinaryF1(ObjectDetectionMetric):
"""Compute F1 with confusion matrix (2 * (precision * recall) / (precision + recall)) for binary object detection task.
Usage: binary_f1 = BinaryF1()
binary_f1(cuboids_output: list of list, cuboids_target: list of list) # cuboids wrapped in list of list (scenes)
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'class_id' -> class_id, either 0 or 1
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self):
self.func = self.precision
@staticmethod
def precision(output: list, target: list):
TP, FP, FN_0, FN_1, TN = calc_binary_confusion_matrix(output, target)
precision = TP / (TP + FP)
recall = TP / (TP + FN_0 + FN_1)
return 2 * (precision * recall) / (precision + recall)
class GeometricMetrics(OnlineMetric):
def __init__(self,
find_correspondences__th=-1.0,
find_correspondences__sort: Sort=Sort.DISTANCE):
self.threshold_ = find_correspondences__th
self.sort_ = find_correspondences__sort
self.avg_f = OnlineAverage()
self.avg_f2 = OnlineAverage()
self.avg_f.reset()
self.avg_f2.reset()
def __call__(self, output: list, target: list):
results = [self.__metric_per_item(c1, c2)
for c1, c2 in zip(output, target)]
mean = self.avg_f(list(itertools.chain(*results)))
mean2 = self.avg_f2(np.power(list(itertools.chain(*results)), 2))
var = mean2 - mean**2
return mean, var
def reset(self):
self.avg_f.reset()
self.avg_f2.reset()
def __metric_per_item(self, output_i, target_i): # item out of minibatch
o_to_t, t_to_o = find_correspondences(output_i,
target_i,
threshold=self.threshold_,
sort=self.sort_)
# ignore targets without a corresponding prediction
mask = ~np.isinf(t_to_o)
sort = t_to_o[mask].astype(np.int)
output_i = np.array(output_i)[sort]
target_i = np.array(target_i)[mask]
# compute metric per item
return [self.func(o, t) for o, t in zip(output_i, target_i)]
@classmethod
def __repr__(cls):
return cls.__name__
class MeanIoU(GeometricMetrics):
"""Compute the Intersection over Union (IoU) of two arbitrary 2d-/ 3d cuboids
Usage: iou = IoU()
iou(cuboids_1: list, cuboids_2: list) # cuboids wrapped in list
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self, **kwargs):
self.func = self.intersection_over_union
super().__init__(**kwargs)
@staticmethod
def intersection_over_union(output: dict, target: dict):
return iou(output, target)
class MeanDistanceError(GeometricMetrics):
"""Compute the geometric distance of two cuboids
Usage: dist = MeanDistanceError()
dist(cuboids_1: list, cuboids_2: list) # cuboids wrapped in list
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self, **kwargs):
self.func = self.mean_distance_error
super().__init__(**kwargs)
@staticmethod
def mean_distance_error(output: dict, target: dict):
error_vec = output['c'] - target['c']
error = np.sqrt(np.dot(error_vec, error_vec))
return error
class MeanDimensionError(GeometricMetrics):
"""Compute the geometric distance of two cuboids
Usage: dist = MeanDistanceError()
dist(cuboids_1: list, cuboids_2: list) # cuboids wrapped in list
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self,
dimension,
target_dimension_order=np.array([0, 1, 2]),
**kwargs):
self.func = self.dimension_error
self.dimension_ = dimension
self.td_order_ = target_dimension_order
super().__init__(**kwargs)
def dimension_error(self, output: dict, target: dict):
error = output['d'][self.dimension_] - \
target['d'][self.td_order_][self.dimension_]
return np.abs(error)
def __repr__(self):
return self.__class__.__name__ + "_dim_{}".format(self.dimension_)
class AveragePrecision(ObjectDetectionMetric):
"""Compute the Average Precision of two lists arbitrary 2d-/ 3d cuboids
Usage: ap = AveragePrecision()
ap(cuboids_1: list, cuboids_2: list) # cuboids wrapped in list
Cuboid parameters (dict):Format:'c' -> center of cuboid array[x,y, ... n]
'd' -> dimension of cuboid array[length,width, ... n]
'r' -> rotation of cuboid as 3x3 rot matrix
'confidence_score' -> confidence_score of bbox
Attributes:
func (TYPE): function used in parent class
"""
def __init__(self, return_prec_rec_curve=False, iou_th=0.5):
self.func = self.calc_score
self.return_prec_rec_curve = return_prec_rec_curve
self.iou_th_ = iou_th
def calc_score(self, output_list, targets_list):
y_true_list = []
scores_list = []
for o, t in zip(output_list, targets_list): # iter over single item
output_to_target, target_to_output = find_correspondences(
o, t, self.iou_th_, Sort.IOU)
n_pred = len(output_to_target)
n_fn = np.isinf(target_to_output).sum()
y_true = np.ones(n_pred + n_fn)
y_true[:n_pred][np.isinf(output_to_target)] = 0
scores = np.zeros_like(y_true)
scores[:n_pred] = [cuboid['confidence_score'] for cuboid in o]
y_true_list.extend(y_true)
scores_list.extend(scores)
precision, recall, threshold = precision_recall_curve(
y_true_list, scores_list)
if threshold[0] == 0:
average_precision = - \
np.sum(np.diff(recall[1:]) * np.array(precision)[1:-1])
else:
average_precision = - \
np.sum(np.diff(recall) * np.array(precision)[:-1])
if self.return_prec_rec_curve:
return average_precision, (precision, recall, threshold)
return average_precision
def __repr__(self):
return self.__class__.__name__ + "_{}".format(self.iou_th_)
class DataCollector(OnlineMetric):
"""docstring for DataCollector"""
output_list = []
target_list = []
@classmethod
def __call__(cls, output, target):
cls.output_list.extend(output)
cls.target_list.extend(target)
return cls
@classmethod
def reset(cls):
cls.output_list = []
cls.target_list = []
if __name__ == '__main__':
# usage example
# prepare demo data
out = [dict(c=np.array([0., 1., 2.]),
d=np.array([4., 8., 10.]),
r=Rotation.from_euler(
'xyz', [45, 10, 30], degrees=True).as_matrix(),
class_id=1),
dict(c=np.array([0., 1., 2.]),
d=np.array([4., 8., 10.]),
r=Rotation.from_euler(
'xyz', [45, 10, 30], degrees=True).as_matrix(),
class_id=1)
]
target = [dict(c=np.array([0., 1., 2.]),
d=np.array([4., 8., 10.]),
r=Rotation.from_euler(
'xyz', [45, 10, 30], degrees=True).as_matrix(),
class_id=1),
dict(c=np.array([0., 1.5, 2.]),
d=np.array([8., 8., 10.]),
r=Rotation.from_euler(
'xyz', [45, 20, 30], degrees=True).as_matrix(),
class_id=1)
]
# usage example
m = MeanDistanceError()
m.test = "nachtr"
metrics = [m, MeanDistanceError(), IoU()]
result = {repr(m): m(out, target) for m in metrics}
print(result)
metrics = [BinaryIoU(), BinaryAccuracy()]
result = {repr(m): m([out], [target]) for m in metrics}
print(result)
| [
"logging.getLogger",
"numpy.prod",
"itertools.chain",
"numpy.logical_not",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"scipy.spatial.transform.Rotation.from_euler",
"ummon.utils.average_utils.OnlineAverage",
"numpy.diff",
"numpy.dot",
"logging.FileHandler",
"numpy.matmul",
"numpy.conc... | [((940, 982), 'logging.getLogger', 'logging.getLogger', (['"""geometric_metrics_log"""'], {}), "('geometric_metrics_log')\n", (957, 982), False, 'import logging\n'), ((983, 1023), 'os.makedirs', 'os.makedirs', (['"""./logfile/"""'], {'exist_ok': '(True)'}), "('./logfile/', exist_ok=True)\n", (994, 1023), False, 'import os\n'), ((1082, 1141), 'logging.FileHandler', 'logging.FileHandler', (["('./logfile/' + 'geometric_metrics.log')"], {}), "('./logfile/' + 'geometric_metrics.log')\n", (1101, 1141), False, 'import logging\n'), ((1227, 1300), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1244, 1300), False, 'import logging\n'), ((633, 665), 'packaging.version.parse', 'version.parse', (['scipy.__version__'], {}), '(scipy.__version__)\n', (646, 665), False, 'from packaging import version\n'), ((668, 690), 'packaging.version.parse', 'version.parse', (['"""1.4.0"""'], {}), "('1.4.0')\n", (681, 690), False, 'from packaging import version\n'), ((2245, 2265), 'numpy.arange', 'np.arange', (['(2 * p_dim)'], {}), '(2 * p_dim)\n', (2254, 2265), True, 'import numpy as np\n'), ((2526, 2558), 'numpy.matmul', 'np.matmul', (['norm_vec', 'cub_rot_m.T'], {}), '(norm_vec, cub_rot_m.T)\n', (2535, 2558), True, 'import numpy as np\n'), ((3038, 3070), 'numpy.concatenate', 'np.concatenate', (['(d1, d2)'], {'axis': '(0)'}), '((d1, d2), axis=0)\n', (3052, 3070), True, 'import numpy as np\n'), ((3088, 3129), 'numpy.concatenate', 'np.concatenate', (['(norm_vec_rot, d)'], {'axis': '(1)'}), '((norm_vec_rot, d), axis=1)\n', (3102, 3129), True, 'import numpy as np\n'), ((3802, 3852), 'numpy.concatenate', 'np.concatenate', (['(halfspaces1, halfspaces2)'], {'axis': '(0)'}), '((halfspaces1, halfspaces2), axis=0)\n', (3816, 3852), True, 'import numpy as np\n'), ((5760, 5794), 'numpy.zeros', 'np.zeros', (['(num_target, num_output)'], {}), '((num_target, num_output))\n', (5768, 5794), True, 'import numpy as np\n'), ((2720, 2762), 'numpy.matmul', 'np.matmul', (["(-(cuboid['d'] / 2))", 'cub_rot_m.T'], {}), "(-(cuboid['d'] / 2), cub_rot_m.T)\n", (2729, 2762), True, 'import numpy as np\n'), ((2786, 2825), 'numpy.matmul', 'np.matmul', (["(cuboid['d'] / 2)", 'cub_rot_m.T'], {}), "(cuboid['d'] / 2, cub_rot_m.T)\n", (2795, 2825), True, 'import numpy as np\n'), ((4112, 4161), 'scipy.spatial.HalfspaceIntersection', 'HalfspaceIntersection', (['halfspaces', 'feasible_point'], {}), '(halfspaces, feasible_point)\n', (4133, 4161), False, 'from scipy.spatial import HalfspaceIntersection, ConvexHull\n'), ((4177, 4205), 'scipy.spatial.ConvexHull', 'ConvexHull', (['hs.intersections'], {}), '(hs.intersections)\n', (4187, 4205), False, 'from scipy.spatial import HalfspaceIntersection, ConvexHull\n'), ((6126, 6185), 'numpy.array', 'np.array', (["[cuboid['confidence_score'] for cuboid in output]"], {}), "([cuboid['confidence_score'] for cuboid in output])\n", (6134, 6185), True, 'import numpy as np\n'), ((6235, 6265), 'numpy.argsort', 'np.argsort', (['(-confidence_scores)'], {}), '(-confidence_scores)\n', (6245, 6265), True, 'import numpy as np\n'), ((8301, 8359), 'numpy.array', 'np.array', (["[cuboid['class_id'] for cuboid in o]"], {'dtype': 'bool'}), "([cuboid['class_id'] for cuboid in o], dtype=bool)\n", (8309, 8359), True, 'import numpy as np\n'), ((14492, 14507), 'ummon.utils.average_utils.OnlineAverage', 'OnlineAverage', ([], {}), '()\n', (14505, 14507), False, 'from ummon.utils.average_utils import OnlineAverage\n'), ((14530, 14545), 'ummon.utils.average_utils.OnlineAverage', 'OnlineAverage', ([], {}), '()\n', (14543, 14545), False, 'from ummon.utils.average_utils import OnlineAverage\n'), ((18000, 18019), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (18008, 18019), True, 'import numpy as np\n'), ((18390, 18403), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (18396, 18403), True, 'import numpy as np\n'), ((20100, 20148), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_true_list', 'scores_list'], {}), '(y_true_list, scores_list)\n', (20122, 20148), False, 'from sklearn.metrics import average_precision_score, precision_recall_curve\n'), ((2324, 2341), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (2332, 2341), True, 'import numpy as np\n'), ((2371, 2427), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(p_dim * 2, p_dim)'}), '((data, (row, col)), shape=(p_dim * 2, p_dim))\n', (2381, 2427), False, 'from scipy.sparse import csr_matrix\n'), ((4453, 4474), 'numpy.prod', 'np.prod', (["cuboid1['d']"], {}), "(cuboid1['d'])\n", (4460, 4474), True, 'import numpy as np\n'), ((4477, 4498), 'numpy.prod', 'np.prod', (["cuboid2['d']"], {}), "(cuboid2['d'])\n", (4484, 4498), True, 'import numpy as np\n'), ((15426, 15442), 'numpy.isinf', 'np.isinf', (['t_to_o'], {}), '(t_to_o)\n', (15434, 15442), True, 'import numpy as np\n'), ((15505, 15523), 'numpy.array', 'np.array', (['output_i'], {}), '(output_i)\n', (15513, 15523), True, 'import numpy as np\n'), ((15549, 15567), 'numpy.array', 'np.array', (['target_i'], {}), '(target_i)\n', (15557, 15567), True, 'import numpy as np\n'), ((17319, 17347), 'numpy.dot', 'np.dot', (['error_vec', 'error_vec'], {}), '(error_vec, error_vec)\n', (17325, 17347), True, 'import numpy as np\n'), ((19779, 19801), 'numpy.ones', 'np.ones', (['(n_pred + n_fn)'], {}), '(n_pred + n_fn)\n', (19786, 19801), True, 'import numpy as np\n'), ((19884, 19905), 'numpy.zeros_like', 'np.zeros_like', (['y_true'], {}), '(y_true)\n', (19897, 19905), True, 'import numpy as np\n'), ((2116, 2147), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (["cuboid['r']"], {}), "(cuboid['r'])\n", (2134, 2147), False, 'from scipy.spatial.transform import Rotation\n'), ((2290, 2306), 'numpy.arange', 'np.arange', (['p_dim'], {}), '(p_dim)\n', (2299, 2306), True, 'import numpy as np\n'), ((8874, 8900), 'numpy.isinf', 'np.isinf', (['target_to_output'], {}), '(target_to_output)\n', (8882, 8900), True, 'import numpy as np\n'), ((14789, 14814), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (14804, 14814), False, 'import itertools\n'), ((19830, 19856), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (19838, 19856), True, 'import numpy as np\n'), ((21109, 21134), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (21117, 21134), True, 'import numpy as np\n'), ((21151, 21177), 'numpy.array', 'np.array', (['[4.0, 8.0, 10.0]'], {}), '([4.0, 8.0, 10.0])\n', (21159, 21177), True, 'import numpy as np\n'), ((21330, 21355), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (21338, 21355), True, 'import numpy as np\n'), ((21372, 21398), 'numpy.array', 'np.array', (['[4.0, 8.0, 10.0]'], {}), '([4.0, 8.0, 10.0])\n', (21380, 21398), True, 'import numpy as np\n'), ((21566, 21591), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {}), '([0.0, 1.0, 2.0])\n', (21574, 21591), True, 'import numpy as np\n'), ((21611, 21637), 'numpy.array', 'np.array', (['[4.0, 8.0, 10.0]'], {}), '([4.0, 8.0, 10.0])\n', (21619, 21637), True, 'import numpy as np\n'), ((21802, 21827), 'numpy.array', 'np.array', (['[0.0, 1.5, 2.0]'], {}), '([0.0, 1.5, 2.0])\n', (21810, 21827), True, 'import numpy as np\n'), ((21848, 21874), 'numpy.array', 'np.array', (['[8.0, 8.0, 10.0]'], {}), '([8.0, 8.0, 10.0])\n', (21856, 21874), True, 'import numpy as np\n'), ((8643, 8669), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (8651, 8669), True, 'import numpy as np\n'), ((8817, 8849), 'numpy.logical_not', 'np.logical_not', (['output_class_ids'], {}), '(output_class_ids)\n', (8831, 8849), True, 'import numpy as np\n'), ((8937, 8963), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (8945, 8963), True, 'import numpy as np\n'), ((8994, 9026), 'numpy.logical_not', 'np.logical_not', (['output_class_ids'], {}), '(output_class_ids)\n', (9008, 9026), True, 'import numpy as np\n'), ((14859, 14884), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (14874, 14884), False, 'import itertools\n'), ((19725, 19751), 'numpy.isinf', 'np.isinf', (['target_to_output'], {}), '(target_to_output)\n', (19733, 19751), True, 'import numpy as np\n'), ((8415, 8441), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (8423, 8441), True, 'import numpy as np\n'), ((8467, 8493), 'numpy.isinf', 'np.isinf', (['target_to_output'], {}), '(target_to_output)\n', (8475, 8493), True, 'import numpy as np\n'), ((8532, 8558), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (8540, 8558), True, 'import numpy as np\n'), ((8757, 8783), 'numpy.isinf', 'np.isinf', (['output_to_target'], {}), '(output_to_target)\n', (8765, 8783), True, 'import numpy as np\n'), ((20251, 20270), 'numpy.diff', 'np.diff', (['recall[1:]'], {}), '(recall[1:])\n', (20258, 20270), True, 'import numpy as np\n'), ((20373, 20388), 'numpy.diff', 'np.diff', (['recall'], {}), '(recall)\n', (20380, 20388), True, 'import numpy as np\n'), ((21194, 21248), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[45, 10, 30]'], {'degrees': '(True)'}), "('xyz', [45, 10, 30], degrees=True)\n", (21213, 21248), False, 'from scipy.spatial.transform import Rotation\n'), ((21415, 21469), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[45, 10, 30]'], {'degrees': '(True)'}), "('xyz', [45, 10, 30], degrees=True)\n", (21434, 21469), False, 'from scipy.spatial.transform import Rotation\n'), ((21657, 21711), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[45, 10, 30]'], {'degrees': '(True)'}), "('xyz', [45, 10, 30], degrees=True)\n", (21676, 21711), False, 'from scipy.spatial.transform import Rotation\n'), ((21894, 21948), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[45, 20, 30]'], {'degrees': '(True)'}), "('xyz', [45, 20, 30], degrees=True)\n", (21913, 21948), False, 'from scipy.spatial.transform import Rotation\n'), ((20273, 20292), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (20281, 20292), True, 'import numpy as np\n'), ((20391, 20410), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (20399, 20410), True, 'import numpy as np\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This example shows how to fit a model and evaluate its predictions.
"""
import pprint
from functools import partial
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import sys
sys.path.append('/scratch/project_2002244/benchmarks/packages') #
# %matplotlib inline
import mxnet as mx
from mxnet import gluon
import matplotlib.pyplot as plt
import json
import os
from tqdm.autonotebook import tqdm
from pathlib import Path
from gluonts.evaluation import Evaluator
from gluonts.evaluation.backtest import make_evaluation_predictions
from gluonts.model.deepar import DeepAREstimator
# from gluonts.model.seq2seq import MQCNNEstimator
from gluonts.model.canonical import CanonicalRNNEstimator
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.model.gp_forecaster import GaussianProcessEstimator
from gluonts.model.lstnet import LSTNetEstimator
from gluonts.distribution.gaussian import GaussianOutput
from gluonts.distribution.student_t import StudentTOutput
from gluonts.trainer import Trainer
from gluonts.dataset.common import ListDataset
from gluonts.dataset.field_names import FieldName
from gluonts.model.forecast import Config, OutputType
mx.random.seed(0)
np.random.seed(0)
def plot_prob_forecasts(ts_entry, forecast_entry, sample_id, prediction_length, plot_length, inline=True):
prediction_intervals = (50, 67, 95, 99)
legend = ["observations", "median prediction"] + [f"{k}% prediction interval" for k in prediction_intervals][::-1]
_, ax = plt.subplots(1, 1, figsize=(10, 7))
ts_entry[-plot_length:].plot(ax=ax)
forecast_entry.plot(prediction_intervals=prediction_intervals, color='g')
ax.axvline(ts_entry.index[-prediction_length], color='r')
plt.legend(legend, loc="upper left")
if inline:
plt.show()
plt.clf()
def get_custom_dataset(name, horizon):
"""
"""
if name=="electricity":
csv_path = r'/scratch/project_2002244/DeepAR/data/elect/electricity.csv'
df = pd.read_csv(csv_path, sep=",", index_col=0, parse_dates=True, decimal='.').astype(float)
df.fillna(0, inplace=True)
train_start = '2012-01-01 00:00:00'
train_end = '2014-05-26 23:00:00'
test_start = '2014-05-27 00:00:00'
test_end = '2014-12-31 23:00:00'
elif name=="europe_power_system":
csv_path = r'/scratch/project_2002244/DeepAR/data/elect/europe_power_system.csv'
df = pd.read_csv(csv_path, sep=",", index_col=0, parse_dates=True, decimal='.').astype(float)
df.fillna(0, inplace=True)
train_start = '2015-01-01 00:00:00'
train_end = '2017-06-23 23:00:00'
test_start = '2017-06-24 00:00:00'
test_end = '2017-11-30 23:00:00'
train_target_values = df[:train_end].T.values
test_target_values = df[:(pd.Timestamp(test_start)-timedelta(hours=1))].T.values
start_dates = np.array([pd.Timestamp(df.index[0], freq='1H') for _ in range(train_target_values.shape[0])])
train_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start
}
for (target, start) in zip(train_target_values, start_dates)
], freq="1H")
test_ds = ListDataset([
{
FieldName.TARGET: target,
FieldName.START: start
}
for index in pd.date_range(start=(pd.Timestamp(test_start)-timedelta(hours=1)+timedelta(hours=horizon)),
end=pd.Timestamp(test_end), freq='{}H'.format(horizon))
for (target, start) in zip(df[:index].T.values , start_dates)
], freq="1H")
return train_ds, test_ds
datasets = [
"electricity",
"europe_power_system"
]
plot = False
save = True
ctx = mx.Context("cpu") #"gpu"
n_samples = 100
epochs = 500
num_batches_per_epoch = 50
learning_rate = 1e-3
freq = "H"
context_length = 168
batch_size = 64
horizons = [3, 6, 12, 24, 36]
patience = 25
estimators = [
partial(
SimpleFeedForwardEstimator,
distr_output=GaussianOutput(),
trainer=Trainer(
ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,
batch_size=batch_size, learning_rate=learning_rate, patience=patience
),
),
partial(
DeepAREstimator,
#distr_output = GaussianOutput(),
# use_feat_static_cat=True,
# cardinality=1,
num_layers=3,
trainer=Trainer(
ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,
batch_size=batch_size, learning_rate=learning_rate, patience=patience
),
),
partial(
LSTNetEstimator,
skip_size=24,
channels=24*6,
ar_window = 24,
num_series=321,
trainer=Trainer(
ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,
batch_size=batch_size, learning_rate=learning_rate, patience=patience
),
),
partial(
CanonicalRNNEstimator,
#distr_output=GaussianOutput(), #StudentTOutput(),
#num_layers=2,
#cell_type="lstm",
#num_cells=100,
cardinality = [1],
trainer=Trainer(
ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,
batch_size=batch_size, learning_rate=learning_rate, patience=patience
),
),
partial(
GaussianProcessEstimator,
# cardinality = 1,
trainer=Trainer(
ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,
batch_size=batch_size, learning_rate=learning_rate, patience=patience
),
),
]
def evaluate(dataset_name, estimator, horizon):
train_ds, test_ds = get_custom_dataset(dataset_name, horizon)
estimator = estimator(
prediction_length=horizon,
freq=freq,
context_length = context_length,
#cardinality=len(train_ds)
)
print(f"evaluating {estimator} on {dataset_name} dataset for {horizon} horizon")
predictor = estimator.train(train_ds)
forecast_it, ts_it = make_evaluation_predictions(
test_ds, predictor=predictor, num_samples=n_samples
)
print("Obtaining time series conditioning values ...")
tss = list(tqdm(ts_it, total=len(test_ds)))
print("Obtaining time series predictions ...")
forecasts = list(tqdm(forecast_it, total=len(test_ds)))
if plot:
print("Plotting time series predictions ...")
for i in tqdm(range(0, 361, 90)):
ts_entry = tss[i]
forecast_entry = forecasts[i]
plot_prob_forecasts(ts_entry, forecast_entry, i, horizon, context_length)
print("Saving time series predictions ...")
series = int(len(forecasts)/len(train_ds))
sesies_q = np.empty((0,horizon*series), float)
q10_, q50_, q90_, indexes_ = sesies_q, sesies_q, sesies_q, np.empty((0,horizon*series),'datetime64[s]')
for i in range(len(train_ds)):
q10, q50, q90, indexes = np.array([]), np.array([]), np.array([]), np.array([])
for z in range(series):
f_dict = forecasts[z*len(train_ds)+i].as_json_dict(Config(output_types={OutputType.quantiles}))['quantiles']
q10 = np.append(q10, np.array(f_dict['0.1']))
q50 = np.append(q50, np.array(f_dict['0.5']))
q90 = np.append(q90, np.array(f_dict['0.9']))
indexes = np.append(indexes, np.array(list(forecasts[z*len(train_ds)+i].index)))
q10_ = np.vstack((q10_, q10))
q50_ = np.vstack((q50_, q50))
q90_ = np.vstack((q90_, q90))
indexes_ = np.vstack((indexes_, indexes))
if save:
save_file = r"./save/{}_{}_{}".format(type(estimator).__name__, dataset_name, str(horizon))
np.savetxt('{}_q10.txt'.format(save_file), q10_)
np.savetxt('{}_q50.txt'.format(save_file), q50_)
np.savetxt('{}_q90.txt'.format(save_file), q90_)
np.savetxt('{}_index.txt'.format(save_file), indexes_, fmt='%s')
print("Calculating time series prediction metrics ...")
agg_metrics, item_metrics = Evaluator()(
iter(tss), iter(forecasts), num_series=len(test_ds)
)
pprint.pprint(agg_metrics)
eval_dict = agg_metrics
eval_dict["dataset"] = dataset_name
eval_dict["estimator"] = type(estimator).__name__
eval_dict["horizon"] = str(horizon)
return eval_dict
if __name__ == "__main__":
import gluonts
print(gluonts.__version__)
print(mx.__version__)
results = []
for horizon in horizons:
for dataset_name in datasets:
for estimator in estimators:
# catch exceptions that are happening during training to avoid failing the whole evaluation
#try:
evals = evaluate(dataset_name, estimator, horizon)
results.append(evals)
#except Exception as e:
# print(str(e))
df = pd.DataFrame(results)
sub_df = df[
[
"dataset",
"estimator",
"horizon",
"ND",
"NRMSE",
"wQuantileLoss[0.1]",
"wQuantileLoss[0.5]",
"wQuantileLoss[0.9]",
"Coverage[0.1]",
"Coverage[0.5]",
"Coverage[0.9]",
]
]
print(sub_df.to_string())
if save:
sub_df.to_csv(r"./save/metrics_benchmarks_deepar_el.csv",index=False)
| [
"mxnet.Context",
"pandas.read_csv",
"gluonts.model.forecast.Config",
"gluonts.evaluation.Evaluator",
"numpy.array",
"gluonts.evaluation.backtest.make_evaluation_predictions",
"gluonts.distribution.gaussian.GaussianOutput",
"datetime.timedelta",
"sys.path.append",
"pprint.pprint",
"numpy.empty",
... | [((787, 850), 'sys.path.append', 'sys.path.append', (['"""/scratch/project_2002244/benchmarks/packages"""'], {}), "('/scratch/project_2002244/benchmarks/packages')\n", (802, 850), False, 'import sys\n'), ((1791, 1808), 'mxnet.random.seed', 'mx.random.seed', (['(0)'], {}), '(0)\n', (1805, 1808), True, 'import mxnet as mx\n'), ((1809, 1826), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1823, 1826), True, 'import numpy as np\n'), ((4381, 4398), 'mxnet.Context', 'mx.Context', (['"""cpu"""'], {}), "('cpu')\n", (4391, 4398), True, 'import mxnet as mx\n'), ((2110, 2145), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 7)'}), '(1, 1, figsize=(10, 7))\n', (2122, 2145), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2366), 'matplotlib.pyplot.legend', 'plt.legend', (['legend'], {'loc': '"""upper left"""'}), "(legend, loc='upper left')\n", (2340, 2366), True, 'import matplotlib.pyplot as plt\n'), ((6717, 6802), 'gluonts.evaluation.backtest.make_evaluation_predictions', 'make_evaluation_predictions', (['test_ds'], {'predictor': 'predictor', 'num_samples': 'n_samples'}), '(test_ds, predictor=predictor, num_samples=n_samples\n )\n', (6744, 6802), False, 'from gluonts.evaluation.backtest import make_evaluation_predictions\n'), ((7410, 7448), 'numpy.empty', 'np.empty', (['(0, horizon * series)', 'float'], {}), '((0, horizon * series), float)\n', (7418, 7448), True, 'import numpy as np\n'), ((8797, 8823), 'pprint.pprint', 'pprint.pprint', (['agg_metrics'], {}), '(agg_metrics)\n', (8810, 8823), False, 'import pprint\n'), ((2390, 2400), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2398, 2400), True, 'import matplotlib.pyplot as plt\n'), ((2409, 2418), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2416, 2418), True, 'import matplotlib.pyplot as plt\n'), ((7509, 7557), 'numpy.empty', 'np.empty', (['(0, horizon * series)', '"""datetime64[s]"""'], {}), "((0, horizon * series), 'datetime64[s]')\n", (7517, 7557), True, 'import numpy as np\n'), ((8112, 8134), 'numpy.vstack', 'np.vstack', (['(q10_, q10)'], {}), '((q10_, q10))\n', (8121, 8134), True, 'import numpy as np\n'), ((8150, 8172), 'numpy.vstack', 'np.vstack', (['(q50_, q50)'], {}), '((q50_, q50))\n', (8159, 8172), True, 'import numpy as np\n'), ((8188, 8210), 'numpy.vstack', 'np.vstack', (['(q90_, q90)'], {}), '((q90_, q90))\n', (8197, 8210), True, 'import numpy as np\n'), ((8230, 8260), 'numpy.vstack', 'np.vstack', (['(indexes_, indexes)'], {}), '((indexes_, indexes))\n', (8239, 8260), True, 'import numpy as np\n'), ((8713, 8724), 'gluonts.evaluation.Evaluator', 'Evaluator', ([], {}), '()\n', (8722, 8724), False, 'from gluonts.evaluation import Evaluator\n'), ((3490, 3526), 'pandas.Timestamp', 'pd.Timestamp', (['df.index[0]'], {'freq': '"""1H"""'}), "(df.index[0], freq='1H')\n", (3502, 3526), True, 'import pandas as pd\n'), ((4661, 4677), 'gluonts.distribution.gaussian.GaussianOutput', 'GaussianOutput', ([], {}), '()\n', (4675, 4677), False, 'from gluonts.distribution.gaussian import GaussianOutput\n'), ((4695, 4846), 'gluonts.trainer.Trainer', 'Trainer', ([], {'ctx': 'ctx', 'epochs': 'epochs', 'num_batches_per_epoch': 'num_batches_per_epoch', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'patience': 'patience'}), '(ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,\n batch_size=batch_size, learning_rate=learning_rate, patience=patience)\n', (4702, 4846), False, 'from gluonts.trainer import Trainer\n'), ((5064, 5215), 'gluonts.trainer.Trainer', 'Trainer', ([], {'ctx': 'ctx', 'epochs': 'epochs', 'num_batches_per_epoch': 'num_batches_per_epoch', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'patience': 'patience'}), '(ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,\n batch_size=batch_size, learning_rate=learning_rate, patience=patience)\n', (5071, 5215), False, 'from gluonts.trainer import Trainer\n'), ((5401, 5552), 'gluonts.trainer.Trainer', 'Trainer', ([], {'ctx': 'ctx', 'epochs': 'epochs', 'num_batches_per_epoch': 'num_batches_per_epoch', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'patience': 'patience'}), '(ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,\n batch_size=batch_size, learning_rate=learning_rate, patience=patience)\n', (5408, 5552), False, 'from gluonts.trainer import Trainer\n'), ((5811, 5962), 'gluonts.trainer.Trainer', 'Trainer', ([], {'ctx': 'ctx', 'epochs': 'epochs', 'num_batches_per_epoch': 'num_batches_per_epoch', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'patience': 'patience'}), '(ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,\n batch_size=batch_size, learning_rate=learning_rate, patience=patience)\n', (5818, 5962), False, 'from gluonts.trainer import Trainer\n'), ((6091, 6242), 'gluonts.trainer.Trainer', 'Trainer', ([], {'ctx': 'ctx', 'epochs': 'epochs', 'num_batches_per_epoch': 'num_batches_per_epoch', 'batch_size': 'batch_size', 'learning_rate': 'learning_rate', 'patience': 'patience'}), '(ctx=ctx, epochs=epochs, num_batches_per_epoch=num_batches_per_epoch,\n batch_size=batch_size, learning_rate=learning_rate, patience=patience)\n', (6098, 6242), False, 'from gluonts.trainer import Trainer\n'), ((7622, 7634), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7630, 7634), True, 'import numpy as np\n'), ((7636, 7648), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7644, 7648), True, 'import numpy as np\n'), ((7650, 7662), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7658, 7662), True, 'import numpy as np\n'), ((7664, 7676), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7672, 7676), True, 'import numpy as np\n'), ((2598, 2672), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'sep': '""","""', 'index_col': '(0)', 'parse_dates': '(True)', 'decimal': '"""."""'}), "(csv_path, sep=',', index_col=0, parse_dates=True, decimal='.')\n", (2609, 2672), True, 'import pandas as pd\n'), ((7863, 7886), 'numpy.array', 'np.array', (["f_dict['0.1']"], {}), "(f_dict['0.1'])\n", (7871, 7886), True, 'import numpy as np\n'), ((7921, 7944), 'numpy.array', 'np.array', (["f_dict['0.5']"], {}), "(f_dict['0.5'])\n", (7929, 7944), True, 'import numpy as np\n'), ((7979, 8002), 'numpy.array', 'np.array', (["f_dict['0.9']"], {}), "(f_dict['0.9'])\n", (7987, 8002), True, 'import numpy as np\n'), ((9571, 9592), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (9583, 9592), True, 'import pandas as pd\n'), ((3032, 3106), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {'sep': '""","""', 'index_col': '(0)', 'parse_dates': '(True)', 'decimal': '"""."""'}), "(csv_path, sep=',', index_col=0, parse_dates=True, decimal='.')\n", (3043, 3106), True, 'import pandas as pd\n'), ((7772, 7815), 'gluonts.model.forecast.Config', 'Config', ([], {'output_types': '{OutputType.quantiles}'}), '(output_types={OutputType.quantiles})\n', (7778, 7815), False, 'from gluonts.model.forecast import Config, OutputType\n'), ((3407, 3431), 'pandas.Timestamp', 'pd.Timestamp', (['test_start'], {}), '(test_start)\n', (3419, 3431), True, 'import pandas as pd\n'), ((3432, 3450), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3441, 3450), False, 'from datetime import datetime, timedelta\n'), ((4106, 4128), 'pandas.Timestamp', 'pd.Timestamp', (['test_end'], {}), '(test_end)\n', (4118, 4128), True, 'import pandas as pd\n'), ((4036, 4060), 'datetime.timedelta', 'timedelta', ([], {'hours': 'horizon'}), '(hours=horizon)\n', (4045, 4060), False, 'from datetime import datetime, timedelta\n'), ((3992, 4016), 'pandas.Timestamp', 'pd.Timestamp', (['test_start'], {}), '(test_start)\n', (4004, 4016), True, 'import pandas as pd\n'), ((4017, 4035), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4026, 4035), False, 'from datetime import datetime, timedelta\n')] |
# -*- coding: utf-8 -*-
"""
@modified: Jan 29 2021
@created: Dec 02 2020
@author: <NAME>
@reference: https://github.com/ImSoErgodic/py-upset
CentraleSupelec
MICS laboratory
9 rue <NAME>, Gif-Sur-Yvette, 91190 France
Update and fixes of the code from https://github.com/ImSoErgodic/py-upset.
"""
from itertools import chain, combinations
from functools import partial
from matplotlib.patches import Rectangle, Circle
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
from typing import Tuple
def prepare_data_dict_upset_plot(df, field_for_upset, fields_for_sets, fields2vals_keep=None, fields2vals_drop=None,
add_key_to_set_names=True) -> dict:
"""
Function for preparing the dict of dataframes expected as input to the function `plot_upset`. This function takes as
input a dataframe, the name of the field from which you want to build an upset plot and optional fields to filter
rows of the dataframe.
Parameters
----------
df: dataframe
Input dataframe
field_for_upset: str
Name of the field in df used to build set comparisons
fields_for_sets: str
Name of the field(s) used to define sets of values of field_for_upset
fields2vals_keep: dict, default=None
If not None, subset the dataframe by keeping only rows with values in the vals list of each (field, vals) item.
fields2vals_drop: dict, default=None
If not None, subset the dataframe by dropping rows with values in the vals list of each (field, vals) item.
add_key_to_set_names: bool, default=True
If True, names of the fields used to create sets will be prepended to values for the set names in the plot.
"""
data_dict = {}
# filter rows
mask_keep = pd.Series(True, index=df.index)
if fields2vals_keep is not None:
for (field, vals) in fields2vals_keep.items():
if not type(vals)==list:
vals = [vals]
mask_keep = mask_keep & self.df[field].isin(vals)
if fields2vals_drop is not None:
for (field, vals) in fields2vals_drop.items():
if not type(vals)==list:
vals = [vals]
mask_keep = mask_keep & ~self.df[field].isin(vals)
df_mask = df.loc[mask_keep]
# define sets
df_sets = df_mask[fields_for_sets].drop_duplicates()
for i, set_row in df_sets.iterrows():
# set mask
set_mask = pd.Series(True, index=df_mask.index)
for field_for_sets in fields_for_sets:
set_mask = set_mask & (df_mask[field_for_sets] == set_row[field_for_sets])
# set name
set_name = []
for field_name, field_val in sorted(set_row.to_dict().items()):
if add_key_to_set_names:
set_name += [field_name, field_val]
else:
set_name += [field_val]
set_name = "_".join(set_name)
# append dataframe
set_upset_values = df_mask.loc[set_mask, field_for_upset].unique()
data_dict[set_name] = pd.DataFrame({field_for_upset: set_upset_values})
return data_dict
def _check_convert_color(color):
if color is None:
return None
else:
if isinstance(color, list) or isinstance(color, np.ndarray):
for i in range(3):
if color[i] > 1:
color[i] = color[i]/255
return color
else:
if not color.startswith("#"):
raise Exception("A string color should start with a # symbol")
else:
color = color.lstrip("#")
color = list(int(color[i:i+2], 16) for i in (0, 2, 4))
return _check_convert_color(color)
def _get_all_common_columns(data_dict):
"""
Computes an array of (unique) common columns to the data frames in data_dict
:param data_dict: Dictionary of data frames
:return: array.
"""
common_columns = []
for i, k in enumerate(data_dict.keys()):
if i == 0:
common_columns = data_dict[k].columns
else:
common_columns = common_columns.intersection(data_dict[k].columns)
if len(common_columns.values) == 0:
raise ValueError('Data frames should have homogeneous columns with the same name to use for computing '
'intersections')
return common_columns.unique()
def plot_upset(data_dict,
figsize,
unique_keys=None,
sort_by='size',
inters_size_bounds=(0, np.inf),
inters_degree_bounds=(1, np.inf),
additional_plots=None,
names_fontsize=14,
query=None,
colors_query=None,
color_vbar=None,
color_hbar=None,
color_matr=None,
vbar_rot=90,
vbar_fmt=".2g",
circle_size=300,
height_ratio=4,
width_setsize=3,
width_names=2,
hspace=0.2,
wspace=0.1,
grid_barplot=False,
invert_barplot=False):
"""
Plots a main set of graph showing intersection size, intersection matrix and the size of base sets. If given,
additional plots are placed below the main graph.
:param data_dict: dictionary like {data_frame_name: data_frame}
:param figsize: tuple, size of the figure.
:param unique_keys: list. Specifies the names of the columns that, together, can uniquely identify a row. If left
empty, pyUpSet will try to use all common columns in the data frames and may possibly raise an exception (no
common columns) or produce unexpected results (columns in different data frames with same name but different
meanings/data).
:param sort_by: 'size' or 'degree'. The order in which to sort the intersection bar chart and matrix in the main
graph
:param inters_size_bounds: tuple. Specifies the size limits of the intersections that will be displayed.
Intersections (and relative data) whose size is outside the interval will not be plotted. Defaults to (0, np.inf).
:param inters_degree_bounds: tuple. Specified the degree limits of the intersections that will be displayed.
Intersections (and relative data) whose degree is outside the interval will not be plotted. Defaults to (0, np.inf).
:param additional_plots: list of dictionaries. See below for details.
:param names_fontsize: float giving the fontsize of names.
:param query: list of tuples. See below for details.
:param colors_query: list of colors.
:param color_vbar: 4-length array. If None, a gray is used.
:param color_hbar: 4-length array or matplotlib color name. If None, a gray is used.
:param color_matr: 4-length array. If None, a gray is used.
:param vbar_rot: float, otation of vertical bars annotations.
:param vbar_fmt: str, print format of vertical bars annotations.
:param circle_size: int, size of the circles in the matrix plot.
:param height_ratio: int, ratio of the intersection barplot plot height to the matrix plot height.
:param width_setsize: int or float, width of the set size plot
:param width_names: int or float, width of the names plot
:param hspace: float, hspace in GridSpec
:param wspace: float, wspace in GridSpec
:param invert_barplot: bool, set to True to invert orientation of intersection barplot plot.
:param grid_barplot: bool, set to True to draw grid on barplot plot
:return: dictionary of matplotlib objects, namely the figure and the axes.
:raise ValueError: if no unique_keys are specified and the data frames have no common column names.
The syntax to specify additional plots follows the signature of the corresponding matplotlib method in an Axes
class. For each additional plot one specifies a dictionary with the kind of plot, the columns name to retrieve
relevant data and the kwargs to pass to the plot function, as in `{'kind':'scatter', 'data':{'x':'col_1',
'y':'col_2'}, 'kwargs':{'s':50}}`.
Currently supported additional plots: scatter.
It is also possible to highlight intersections. This is done through the `query` argument, where the
intersections to highligh must be specified with the names used as keys in the data_dict.
"""
color_vbar = _check_convert_color(color_vbar)
color_hbar = _check_convert_color(color_hbar)
color_matr = _check_convert_color(color_matr)
colors_query = list(map(_check_convert_color, colors_query))
query = [] if query is None else query
ap = [] if additional_plots is None else additional_plots
all_columns = unique_keys if unique_keys is not None else _get_all_common_columns(data_dict)
all_columns = list(all_columns)
plot_data = DataExtractor(data_dict, all_columns)
ordered_inters_sizes, ordered_in_sets, ordered_out_sets = \
plot_data.get_filtered_intersections(sort_by,inters_size_bounds,inters_degree_bounds)
ordered_dfs, ordered_df_names = plot_data.ordered_dfs, plot_data.ordered_df_names
upset = UpSetPlot(
figsize = figsize,
rows = len(ordered_dfs),
cols = len(ordered_in_sets),
additional_plots = additional_plots,
names_fontsize = names_fontsize,
query = query,
colors_query = colors_query,
color_vbar = color_vbar,
color_hbar = color_hbar,
color_matr = color_matr,
vbar_rot = vbar_rot,
vbar_fmt = vbar_fmt,
circle_size = circle_size,
height_ratio = height_ratio,
width_setsize = width_setsize,
width_names = width_names,
invert_barplot = invert_barplot,
grid_barplot = grid_barplot,
hspace = hspace,
wspace = wspace,
)
fig_dict = upset.main_plot(
ordered_dfs,
ordered_df_names,
ordered_in_sets,
ordered_out_sets,
ordered_inters_sizes
)
fig_dict['additional'] = []
# ap = [{kind:'', data:{x:'', y:''}, s:'', ..., kwargs:''}]
for i, graph_settings in enumerate(ap):
plot_kind = graph_settings.pop('kind')
data_vars = graph_settings.pop('data_quantities')
graph_properties = graph_settings.get('graph_properties', {})
data_values = plot_data.extract_data_for(data_vars, query)
ax = upset.additional_plot(i, plot_kind, data_values, graph_properties, labels=data_vars)
fig_dict['additional'].append(ax)
return fig_dict
class UpSetPlot():
def __init__(self, figsize, rows, cols, additional_plots, names_fontsize, query, colors_query, color_vbar, color_hbar,
color_matr, vbar_rot, vbar_fmt, circle_size, height_ratio, width_setsize, width_names, invert_barplot,
grid_barplot, hspace, wspace):
"""
Generates figures and axes.
:param figsize: Size of the figure
:param rows: The number of rows of the intersection matrix
:param cols: The number of columns of the intersection matrix
:param additional_plots: list of dictionaries as specified in plot_upset()
:param names_fontsize: float as specified in plot_upset()
:param query: list of tuples as specified in plot_upset()
:param colors_query: list of colors as specified in plot_upset()
:param color_vbar: 4-length array or color name
:param color_hbar: 4-length array
:param color_matr: 4-length array
:param vbar_rot: float
:param vbar_fmt: str
:param circle_size: float
:param height_ratio: float
:param width_setsize: int or float
:param width_names: int
:param invert_barplot: bool
:param grid_barplot: bool
:param hspace: float
:param wspace: float
"""
self.figsize = figsize
self.vbar_rot = vbar_rot
self.vbar_fmt = vbar_fmt
self.names_fontsize = names_fontsize
self.circle_size = circle_size
self.height_ratio = height_ratio
self.width_names = width_names
self.width_setsize = width_setsize
self.invert_barplot = invert_barplot
self.grid_barplot = grid_barplot
self.hspace = hspace
self.wspace = wspace
# set standard colors
self.greys = plt.cm.Greys([.22, .8])
if color_hbar is None:
self.color_hbar = self.greys[1]
else:
self.color_hbar = color_hbar
if color_vbar is None:
self.color_vbar = self.greys[1]
else:
self.color_vbar = color_vbar
if color_matr is None:
self.color_matr = self.greys[1]
else:
self.color_matr = color_matr
# map queries to graphic properties
self.query = query
if colors_query is None:
colors_query = plt.cm.rainbow(np.linspace(.01, .99, len(self.query)))
self.query2color = dict(zip([frozenset(q) for q in self.query], colors_query))
self.query2zorder = dict(zip([frozenset(q) for q in self.query], np.arange(len(self.query)) + 1))
# set figure properties
self.rows = rows
self.cols = cols
self.x_values, self.y_values = self._create_coordinates(rows, cols)
self.fig, self.ax_intbars, self.ax_intmatrix, \
self.ax_setsize, self.ax_tablenames, self.additional_plots_axes = self._prepare_figure(additional_plots)
self.standard_graph_settings = {
'scatter': {
'alpha': .3,
'edgecolor': None
},
'hist': {
'histtype': 'stepfilled',
'alpha': .3,
'lw': 0
}
}
# single dictionary may be fragile - I leave it here as a future option
# self.query2kwargs = dict(zip([frozenset(q) for q in self.query],
# [dict(zip(['color', 'zorder'],
# [col, 1])) for col in qu_col]))
def _create_coordinates(self, rows, cols):
"""
Creates the x, y coordinates shared by the main plots.
:param rows: number of rows of intersection matrix
:param cols: number of columns of intersection matrix
:return: arrays with x and y coordinates
"""
x_values = (np.arange(cols) + 1)
y_values = (np.arange(rows) + 1)
return x_values, y_values
def _prepare_figure(self, additional_plots):
"""
Prepares the figure, axes (and their grid) taking into account the additional plots.
:param additional_plots: list of dictionaries as specified in plot_upset()
:return: references to the newly created figure and axes
"""
fig = plt.figure(figsize=self.figsize)
if additional_plots:
main_gs = gridspec.GridSpec(3, 1, hspace=.4)
topgs = main_gs[:2, 0]
botgs = main_gs[2, 0]
else:
topgs = gridspec.GridSpec(1, 1)[0, 0]
if type(self.width_setsize) == float:
self.width_setsize = np.int(np.rint(self.width_setsize*self.cols))
if type(self.width_names) == float:
self.width_names = np.int(np.rint(self.width_names*self.cols))
top_ncols = self.cols + self.width_names + self.width_setsize
top_nrows = self.rows + self.rows * self.height_ratio
if (top_ncols - self.cols) <= 1:
raise ValueError("Reduce width ratio so that 2 plots can fit on the left (set size plot and names).")
gs_top = gridspec.GridSpecFromSubplotSpec(
nrows = top_nrows,
ncols = top_ncols,
subplot_spec = topgs,
wspace = self.wspace,
hspace = self.hspace
)
setsize_w , setsize_h = self.width_setsize , self.rows
tablesize_w , tablesize_h = self.width_names , self.rows
intmatrix_w , intmatrix_h = self.cols , self.rows
intbars_w , intbars_h = self.cols , top_nrows - self.rows
if self.invert_barplot:
ax_setsize = plt.subplot(gs_top[:setsize_h, 0:setsize_w])
ax_tablenames = plt.subplot(gs_top[:tablesize_h, setsize_w:(setsize_w+tablesize_w)])
ax_intmatrix = plt.subplot(gs_top[:intmatrix_h, (setsize_w+tablesize_w):-1])
ax_intbars = plt.subplot(gs_top[-intbars_h:-1, (setsize_w+tablesize_w):-1])
else:
ax_setsize = plt.subplot(gs_top[-setsize_h:-1, 0:setsize_w])
ax_tablenames = plt.subplot(gs_top[-tablesize_h:-1, setsize_w:(setsize_w+tablesize_w)])
ax_intmatrix = plt.subplot(gs_top[-intmatrix_h:-1, (setsize_w+tablesize_w):-1])
ax_intbars = plt.subplot(gs_top[:intbars_h, (setsize_w+tablesize_w):-1])
add_ax = []
if additional_plots:
num_plots = len(additional_plots)
num_bot_rows, num_bot_cols = int(np.ceil(num_plots / 2)), 2
gs_bottom = gridspec.GridSpecFromSubplotSpec(
nrows = num_bot_rows,
ncols = num_bot_cols,
subplot_spec = botgs,
wspace = self.wspace,
hspace = self.hspace
)
from itertools import product
for r, c in product(range(num_bot_rows), range(num_bot_cols)):
if r+c+1>num_plots: break
new_plotL = plt.subplot(gs_bottom[r, c])
add_ax.append(new_plotL)
return fig, ax_intbars, ax_intmatrix, ax_setsize, ax_tablenames, tuple(add_ax)
def _color_for_query(self, query, mode):
"""
Helper function that returns the standard dark grey for non-queried intersections, and the color assigned to
a query when the class was instantiated otherwise
:param query: frozenset.
:param mode: str.
:return: color as length 4 array.
"""
# query_color = self.query2color.get(query, self.greys[1])
if mode == "matr":
query_color = self.query2color.get(query, self.color_matr)
elif mode == "vbar":
query_color = self.query2color.get(query, self.color_vbar)
return query_color
def _zorder_for_query(self, query):
"""
Helper function that returns 0 for non-queried intersections, and the zorder assigned to
a query when the class was instantiated otherwise
:param query: frozenset.
:return: zorder as int.
"""
query_zorder = self.query2zorder.get(query, 0)
return query_zorder
def main_plot(self, ordered_dfs, ordered_df_names, ordered_in_sets, ordered_out_sets, ordered_inters_sizes):
"""
Creates the main graph comprising bar plot of base set sizes, bar plot of intersection sizes and intersection
matrix.
:param ordered_dfs: array of input data frames, sorted w.r.t. the sorting parameters provided by the user (if
any)
:param ordered_df_names: array of names of input data frames, sorted (as above)
:param ordered_in_sets: list of tuples. Each tuple represents an intersection. The list must be sorted as the
other parameters.
:param ordered_out_sets: list of tuples. Each tuple represents the sets excluded from the corresponding
intersection described by ordered_in_sets.
:param ordered_inters_sizes: array of ints. Contains the intersection sizes, sorted as the other arguments.
:return: dictionary containing figure and axes references.
"""
ylim = self._base_sets_plot(ordered_dfs, ordered_df_names)
self._table_names_plot(ordered_df_names, ylim)
xlim = self._inters_sizes_plot(ordered_in_sets, ordered_inters_sizes)
set_row_map = dict(zip(ordered_df_names, self.y_values))
self._inters_matrix(ordered_in_sets, ordered_out_sets, xlim, ylim, set_row_map)
return {'figure': self.fig,
'intersection_bars': self.ax_intbars,
'intersection_matrix': self.ax_intmatrix,
'base_setsize': self.ax_setsize,
'tablenames': self.ax_tablenames}
def _table_names_plot(self, sorted_set_names, ylim):
ax = self.ax_tablenames
ax.set_ylim(ylim)
xlim = ax.get_xlim()
tr = ax.transData.transform
for i, name in enumerate(sorted_set_names):
ax.text(
x = (xlim[1]-xlim[0])/2,
y = self.y_values[i],
s = name,
fontsize = self.names_fontsize,
clip_on = True,
va = 'center',
ha = 'center',
transform = ax.transData,
family = 'monospace'
)
if len(self.x_values) > 1:
row_width = self.x_values[1] - self.x_values[0]
else:
row_width = self.x_values[0]
background = plt.cm.Greys([.09])[0]
for r, y in enumerate(self.y_values):
if r % 2 == 0:
ax.add_patch(
Rectangle((xlim[0], y - row_width / 2),
height=row_width,
width=xlim[1],
color=background, zorder=0)
)
ax.axis('off')
def _base_sets_plot(self, sorted_sets, sorted_set_names):
"""
Plots horizontal bar plot for base set sizes.
:param sorted_sets: list of data frames, sorted according to user's directives.
:param sorted_set_names: list of names for the data frames.
:return: Axes.
"""
ax = self.ax_setsize
ax.invert_xaxis()
height = .7
bar_center = self.y_values
ax.barh(
y = bar_center,
width = [len(x) for x in sorted_sets],
height = height,
color = self.color_hbar,
align = "center"
)
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 4))
self._strip_axes(ax, keep_spines=['bottom'], keep_ticklabels=['bottom'])
ax.set_ylim((height / 2, ax.get_ylim()[1] + height / 2))
xlim = ax.get_xlim()
ax.set_xlim(xlim[0], xlim[1] + 0.04 * (xlim[1]-xlim[0]))
ax.spines['bottom'].set_bounds(xlim[0], xlim[1] + 0.04 * (xlim[1]-xlim[0]))
ax.set_xlabel("Set size", fontweight='bold', fontsize=13)
return ax.get_ylim()
def _strip_axes(self, ax, keep_spines=None, keep_ticklabels=None):
"""
Removes spines and tick labels from ax, except those specified by the user.
:param ax: Axes on which to operate.
:param keep_spines: Names of spines to keep.
:param keep_ticklabels: Names of tick labels to keep.
Possible names are 'left'|'right'|'top'|'bottom'.
"""
tick_params_dict = {
'which' : 'both',
'bottom' : False,
'top' : False,
'left' : False,
'right' : False,
'labelbottom' : False,
'labeltop' : False,
'labelleft' : False,
'labelright' : False
}
if keep_ticklabels is None:
keep_ticklabels = []
if keep_spines is None:
keep_spines = []
lab_keys = [(k, "".join(["label", k])) for k in keep_ticklabels]
for k in lab_keys:
tick_params_dict[k[0]] = True
tick_params_dict[k[1]] = True
ax.tick_params(**tick_params_dict)
for sname, spine in ax.spines.items():
if sname not in keep_spines:
spine.set_visible(False)
def _inters_sizes_plot(self, ordered_in_sets, inters_sizes):
"""
Plots bar plot for intersection sizes.
:param ordered_in_sets: array of tuples. Each tuple represents an intersection. The array is sorted according
to the user's directives
:param inters_sizes: array of ints. Sorted, likewise.
:return: Axes
"""
ax = self.ax_intbars
width = .7
bar_center = self.x_values
bar_colors = [self._color_for_query(frozenset(inter), mode="vbar") for inter in ordered_in_sets]
ax.bar(
x = bar_center,
height = inters_sizes,
width = width,
color = bar_colors,
linewidth = 0,
align = "center"
)
ylim = ax.get_ylim()
hgap = (ylim[1] - ylim[0]) / 60
if self.invert_barplot:
for x, y in zip(self.x_values, inters_sizes):
ax.text(
x,
y + 3 * hgap,
("{:%s}" % self.vbar_fmt).format(y),
rotation = self.vbar_rot,
ha = 'center',
va = 'bottom'
)
ax.invert_xaxis()
ax.invert_yaxis()
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
self._strip_axes(ax, keep_spines=['right'], keep_ticklabels=['right'])
else:
for x, y in zip(self.x_values, inters_sizes):
ax.text(
x,
y + hgap,
("{:%s}" % self.vbar_fmt).format(y),
rotation = self.vbar_rot,
ha = 'center',
va = 'bottom'
)
self._strip_axes(ax, keep_spines=['left'], keep_ticklabels=['left'])
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 4))
ylim = ax.get_ylim()
if self.invert_barplot:
ax.spines['right'].set_bounds(ylim[1], ylim[0] + 0.04 * (ylim[0]-ylim[1]))
else:
ax.spines['left'].set_bounds(ylim[0], ylim[1] + 0.04 * (ylim[1]-ylim[0]))
if self.grid_barplot:
ax.yaxis.grid(True, lw=.25, color='grey', ls=':')
ax.set_axisbelow(True)
ax.set_ylabel("Intersection size", labelpad=6, fontweight='bold', fontsize=13)
return ax.get_xlim()
def _inters_matrix(self, ordered_in_sets, ordered_out_sets, xlims, ylims, set_row_map):
"""
Plots intersection matrix.
:param ordered_in_sets: Array of tuples representing sets included in an intersection. Sorted according to
the user's directives.
:param ordered_out_sets: Array of tuples representing sets excluded from an intersection. Sorted likewise.
:param xlims: tuple. x limits for the intersection matrix plot.
:param ylims: tuple. y limits for the intersection matrix plot.
:param set_row_map: dict. Maps data frames (base sets) names to a row of the intersection matrix
:return: Axes
"""
ax = self.ax_intmatrix
ax.set_xlim(xlims)
ax.set_ylim(ylims)
if len(self.x_values) > 1:
row_width = self.x_values[1] - self.x_values[0]
else:
row_width = self.x_values[0]
self._strip_axes(ax)
background = plt.cm.Greys([.09])[0]
for r, y in enumerate(self.y_values):
if r % 2 == 0:
if self.invert_barplot:
ax.add_patch(
Rectangle(
(xlims[1], y - row_width / 2),
height = row_width,
width = xlims[0],
color = background,
zorder = 0
)
)
else:
ax.add_patch(
Rectangle(
(xlims[0], y - row_width / 2),
height = row_width,
width = xlims[1],
color = background,
zorder = 0
)
)
for col_num, (in_sets, out_sets) in enumerate(zip(ordered_in_sets, ordered_out_sets)):
in_y = [set_row_map[s] for s in in_sets]
out_y = [set_row_map[s] for s in out_sets]
# in_circles = [Circle((self.x_values[col_num], y), radius=dot_size, color=self.greys[1]) for y in in_y]
# out_circles = [Circle((self.x_values[col_num], y), radius=dot_size, color=self.greys[0]) for y in out_y]
# for c in chain.from_iterable([in_circles, out_circles]):
# ax.add_patch(c)
ax.scatter(
np.repeat(self.x_values[col_num], len(in_y)),
in_y,
color = np.tile(self._color_for_query(frozenset(in_sets), mode = "matr"), (len(in_y), 1)),
s = self.circle_size,
)
ax.scatter(
np.repeat(self.x_values[col_num], len(out_y)),
out_y,
color = self.greys[0],
s = self.circle_size
)
ax.vlines(
self.x_values[col_num],
min(in_y), max(in_y),
lw = 3.5,
color = self._color_for_query(frozenset(in_sets), mode="matr")
)
def additional_plot(self, ax_index, kind, data_values, graph_args, *, labels=None):
"""
Scatter plot (for additional plots).
:param ax_index: int. Index for the relevant axes (additional plots' axes are stored in a list)
:param data_values: list of dictionary. Each dictionary is like {'x':data_for_x, 'y':data_for_y,
'in_sets':tuple}, where the tuple represents the intersection the data for x and y belongs to.
:param plot_kwargs: kwargs accepted by matplotlib scatter
:param labels: dictionary. {'x':'x_label', 'y':'y_label'}
:return: Axes
"""
ax = self.additional_plots_axes[ax_index]
plot_method = getattr(ax, kind)
for k, v in self.standard_graph_settings.get(kind, {}).items():
graph_args.setdefault(k, v)
plot_method = partial(plot_method, **graph_args)
# data_values = [{query:{relevant data}}]
ylim, xlim = [np.inf, -np.inf], [np.inf, -np.inf]
for query, data_item in data_values.items():
clr = self._color_for_query(frozenset(query))
plot_method(color=self._color_for_query(frozenset(query)),
zorder=self._zorder_for_query(frozenset(query)),
**data_item
)
new_xlim, new_ylim = ax.get_xlim(), ax.get_ylim()
for old, new in zip([xlim, ylim], [new_xlim, new_ylim]):
old[0] = new[0] if old[0] > new[0] else old[0]
old[1] = new[1] if old[1] < new[1] else old[1]
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 4))
self._strip_axes(ax, keep_spines=['bottom', 'left'], keep_ticklabels=['bottom', 'left'])
# ylim, xlim = ax.get_ylim(), ax.get_xlim()
gap_y, gap_x = max(ylim) / 500.0 * 20, max(xlim) / 500.0 * 20
ax.set_ylim(ylim[0] - gap_y, ylim[1] + gap_y)
ax.set_xlim(xlim[0] - gap_x, xlim[1] + gap_x)
ylim, xlim = ax.get_ylim(), ax.get_xlim()
ax.spines['left'].set_bounds(ylim[0], ylim[1])
ax.spines['bottom'].set_bounds(xlim[0], xlim[1])
for l, text in labels.items():
getattr(ax, 'set_%slabel' % l)(text, labelpad=3,
fontweight='bold', fontsize=13) if l in ['x', 'y'] else None
return ax
class DataExtractor:
def __init__(self, data_dict, unique_keys):
"""
Packages the data in a way that can be consumed by the plot methods in UpSetPlot.
:param data_dict: dict. {'name': pandas DataFrame}
:param unique_keys: list of names of columns that uniquely identify a row in the data frames.
"""
self.unique_keys = unique_keys if len(unique_keys) > 1 else unique_keys[0]
self.ordered_dfs, self.ordered_df_names, self.df_dict = self.extract_base_sets_data(data_dict,
unique_keys)
self.in_sets_list, self.inters_degrees, \
self.out_sets_list, self.inters_df_dict = self.extract_intersection_data()
def extract_base_sets_data(self, data_dict, unique_keys):
"""
Extracts data for the bar graph of the base sets sizes.
:param data_dict: dict. {'name': data frame}
:param unique_keys: list of column names to uniquely identify rows.
:return: list of data frames sorted by shape[0], list of names sorted accordingly, dictionary zipping the two.
"""
dfs = []
df_names = []
# extract interesting columns from dfs
for name, df in data_dict.items():
df_names.append(name)
dfs.append(df[unique_keys])
df_names = np.array(df_names)
# order dfs
base_sets_order = np.argsort([x.shape[0] for x in dfs])[::-1]
ordered_base_set_names = df_names[base_sets_order]
ordered_base_sets = [data_dict[name] for name in ordered_base_set_names]
set_dict = dict(zip(ordered_base_set_names, ordered_base_sets))
return ordered_base_sets, ordered_base_set_names, set_dict
def extract_intersection_data(self):
"""
Extract data to use in intersection bar plot and matrix.
:return: list of tuples (sets included in intersections), list of integers (corresponding degrees of
intersections), list of tuples (sets excluded from intersections), dict {tuple:data frame}, where each data
frame contains only the rows corresponding to the intersection described by the tuple-key.
"""
in_sets_list = []
out_sets_list = []
inters_dict = {}
inters_degrees = []
for col_num, in_sets in enumerate(chain.from_iterable(
combinations(self.ordered_df_names, i) for i in np.arange(1, len(self.ordered_dfs) + 1))):
in_sets = frozenset(in_sets)
inters_degrees.append(len(in_sets))
in_sets_list.append(in_sets)
out_sets = set(self.ordered_df_names).difference(set(in_sets))
in_sets_l = list(in_sets)
out_sets_list.append(set(out_sets))
seed = in_sets_l.pop()
exclusive_intersection = pd.Index(self.df_dict[seed][self.unique_keys])
for s in in_sets_l:
exclusive_intersection = exclusive_intersection.intersection(pd.Index(self.df_dict[s][
self.unique_keys]))
for s in out_sets:
exclusive_intersection = exclusive_intersection.difference(pd.Index(self.df_dict[s][
self.unique_keys]))
final_df = self.df_dict[seed].set_index(pd.Index(self.df_dict[seed][self.unique_keys])).loc[
exclusive_intersection].reset_index(drop=True)
inters_dict[in_sets] = final_df
return in_sets_list, inters_degrees, out_sets_list, inters_dict
def get_filtered_intersections(self, sort_by, inters_size_bounds, inters_degree_bounds):
"""
Filter the intersection data according to the user's directives and return it.
:param sort_by: 'degree'|'size'. Whether to sort intersections by degree or size.
:param inters_size_bounds: tuple. Specifies the size interval of the intersections that will be plotted.
:param inters_degree_bounds: tuple. Specifies the degree interval of the intersections that will be plotted.
:return: Array of int (sizes), array of tuples (sets included in intersection), array of tuples (sets
excluded from intersection), all filtered and sorted.
"""
inters_sizes = np.array([self.inters_df_dict[x].shape[0] for x in self.in_sets_list])
inters_degrees = np.array(self.inters_degrees)
size_clip = (inters_sizes <= inters_size_bounds[1]) & (inters_sizes >= inters_size_bounds[0]) & (
inters_degrees >= inters_degree_bounds[0]) & (inters_degrees <= inters_degree_bounds[1])
in_sets_list = np.array(self.in_sets_list)[size_clip]
out_sets_list = np.array(self.out_sets_list)[size_clip]
inters_sizes = inters_sizes[size_clip]
inters_degrees = inters_degrees[size_clip]
# sort as requested
if sort_by == 'size':
order = np.argsort(inters_sizes)[::-1]
elif sort_by == 'degree':
order = np.argsort(inters_degrees)
# store ordered data
self.filtered_inters_sizes = inters_sizes[order]
self.filtered_in_sets = in_sets_list[order]
self.filtered_out_sets = out_sets_list[order]
return self.filtered_inters_sizes, self.filtered_in_sets, self.filtered_out_sets
def extract_data_for(self, var_dict, queries):
"""
Extract data from named columns (values) and place in named variables (keys).
:return: list of dict. [{query:{x:, y:, ...}}]
"""
data_values = {}
poss_queries = [q for q in queries if frozenset(q) in self.filtered_in_sets]
for q in poss_queries:
data_values[q] = dict(zip(var_dict.keys(),
[self.inters_df_dict[frozenset(q)][v].values for k, v in var_dict.items()]))
data_values['others'] = dict(zip(var_dict.keys(),
[chain(*[self.inters_df_dict[frozenset(q)][v].values for q in self.filtered_in_sets if q not in poss_queries])
for k, v in var_dict.items()]))
for k, vals in data_values['others'].items():
data_values['others'][k] = [x for x in vals]
return data_values
| [
"pandas.Series",
"matplotlib.pyplot.cm.Greys",
"numpy.ceil",
"matplotlib.patches.Rectangle",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"functools.partial",
"pandas.Index",
"numpy.rint",
"itertools.combinations",
"pandas.DataFrame",
"matplot... | [((1910, 1941), 'pandas.Series', 'pd.Series', (['(True)'], {'index': 'df.index'}), '(True, index=df.index)\n', (1919, 1941), True, 'import pandas as pd\n'), ((2576, 2612), 'pandas.Series', 'pd.Series', (['(True)'], {'index': 'df_mask.index'}), '(True, index=df_mask.index)\n', (2585, 2612), True, 'import pandas as pd\n'), ((3180, 3229), 'pandas.DataFrame', 'pd.DataFrame', (['{field_for_upset: set_upset_values}'], {}), '({field_for_upset: set_upset_values})\n', (3192, 3229), True, 'import pandas as pd\n'), ((12688, 12713), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['[0.22, 0.8]'], {}), '([0.22, 0.8])\n', (12700, 12713), True, 'import matplotlib.pyplot as plt\n'), ((15092, 15124), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'self.figsize'}), '(figsize=self.figsize)\n', (15102, 15124), True, 'import matplotlib.pyplot as plt\n'), ((15898, 16028), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': 'top_nrows', 'ncols': 'top_ncols', 'subplot_spec': 'topgs', 'wspace': 'self.wspace', 'hspace': 'self.hspace'}), '(nrows=top_nrows, ncols=top_ncols,\n subplot_spec=topgs, wspace=self.wspace, hspace=self.hspace)\n', (15930, 16028), False, 'from matplotlib import gridspec\n'), ((30536, 30570), 'functools.partial', 'partial', (['plot_method'], {}), '(plot_method, **graph_args)\n', (30543, 30570), False, 'from functools import partial\n'), ((33434, 33452), 'numpy.array', 'np.array', (['df_names'], {}), '(df_names)\n', (33442, 33452), True, 'import numpy as np\n'), ((36321, 36391), 'numpy.array', 'np.array', (['[self.inters_df_dict[x].shape[0] for x in self.in_sets_list]'], {}), '([self.inters_df_dict[x].shape[0] for x in self.in_sets_list])\n', (36329, 36391), True, 'import numpy as np\n'), ((36417, 36446), 'numpy.array', 'np.array', (['self.inters_degrees'], {}), '(self.inters_degrees)\n', (36425, 36446), True, 'import numpy as np\n'), ((14666, 14681), 'numpy.arange', 'np.arange', (['cols'], {}), '(cols)\n', (14675, 14681), True, 'import numpy as np\n'), ((14707, 14722), 'numpy.arange', 'np.arange', (['rows'], {}), '(rows)\n', (14716, 14722), True, 'import numpy as np\n'), ((15176, 15211), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(1)'], {'hspace': '(0.4)'}), '(3, 1, hspace=0.4)\n', (15193, 15211), False, 'from matplotlib import gridspec\n'), ((16473, 16517), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[:setsize_h, 0:setsize_w]'], {}), '(gs_top[:setsize_h, 0:setsize_w])\n', (16484, 16517), True, 'import matplotlib.pyplot as plt\n'), ((16546, 16614), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[:tablesize_h, setsize_w:setsize_w + tablesize_w]'], {}), '(gs_top[:tablesize_h, setsize_w:setsize_w + tablesize_w])\n', (16557, 16614), True, 'import matplotlib.pyplot as plt\n'), ((16643, 16704), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[:intmatrix_h, setsize_w + tablesize_w:-1]'], {}), '(gs_top[:intmatrix_h, setsize_w + tablesize_w:-1])\n', (16654, 16704), True, 'import matplotlib.pyplot as plt\n'), ((16733, 16795), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[-intbars_h:-1, setsize_w + tablesize_w:-1]'], {}), '(gs_top[-intbars_h:-1, setsize_w + tablesize_w:-1])\n', (16744, 16795), True, 'import matplotlib.pyplot as plt\n'), ((16838, 16885), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[-setsize_h:-1, 0:setsize_w]'], {}), '(gs_top[-setsize_h:-1, 0:setsize_w])\n', (16849, 16885), True, 'import matplotlib.pyplot as plt\n'), ((16914, 16985), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[-tablesize_h:-1, setsize_w:setsize_w + tablesize_w]'], {}), '(gs_top[-tablesize_h:-1, setsize_w:setsize_w + tablesize_w])\n', (16925, 16985), True, 'import matplotlib.pyplot as plt\n'), ((17014, 17078), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[-intmatrix_h:-1, setsize_w + tablesize_w:-1]'], {}), '(gs_top[-intmatrix_h:-1, setsize_w + tablesize_w:-1])\n', (17025, 17078), True, 'import matplotlib.pyplot as plt\n'), ((17107, 17166), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_top[:intbars_h, setsize_w + tablesize_w:-1]'], {}), '(gs_top[:intbars_h, setsize_w + tablesize_w:-1])\n', (17118, 17166), True, 'import matplotlib.pyplot as plt\n'), ((17359, 17495), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', ([], {'nrows': 'num_bot_rows', 'ncols': 'num_bot_cols', 'subplot_spec': 'botgs', 'wspace': 'self.wspace', 'hspace': 'self.hspace'}), '(nrows=num_bot_rows, ncols=num_bot_cols,\n subplot_spec=botgs, wspace=self.wspace, hspace=self.hspace)\n', (17391, 17495), False, 'from matplotlib import gridspec\n'), ((21372, 21392), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['[0.09]'], {}), '([0.09])\n', (21384, 21392), True, 'import matplotlib.pyplot as plt\n'), ((27566, 27586), 'matplotlib.pyplot.cm.Greys', 'plt.cm.Greys', (['[0.09]'], {}), '([0.09])\n', (27578, 27586), True, 'import matplotlib.pyplot as plt\n'), ((33499, 33536), 'numpy.argsort', 'np.argsort', (['[x.shape[0] for x in dfs]'], {}), '([x.shape[0] for x in dfs])\n', (33509, 33536), True, 'import numpy as np\n'), ((34921, 34967), 'pandas.Index', 'pd.Index', (['self.df_dict[seed][self.unique_keys]'], {}), '(self.df_dict[seed][self.unique_keys])\n', (34929, 34967), True, 'import pandas as pd\n'), ((36679, 36706), 'numpy.array', 'np.array', (['self.in_sets_list'], {}), '(self.in_sets_list)\n', (36687, 36706), True, 'import numpy as np\n'), ((36742, 36770), 'numpy.array', 'np.array', (['self.out_sets_list'], {}), '(self.out_sets_list)\n', (36750, 36770), True, 'import numpy as np\n'), ((15314, 15337), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (15331, 15337), False, 'from matplotlib import gridspec\n'), ((15431, 15470), 'numpy.rint', 'np.rint', (['(self.width_setsize * self.cols)'], {}), '(self.width_setsize * self.cols)\n', (15438, 15470), True, 'import numpy as np\n'), ((15554, 15591), 'numpy.rint', 'np.rint', (['(self.width_names * self.cols)'], {}), '(self.width_names * self.cols)\n', (15561, 15591), True, 'import numpy as np\n'), ((17810, 17838), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs_bottom[r, c]'], {}), '(gs_bottom[r, c])\n', (17821, 17838), True, 'import matplotlib.pyplot as plt\n'), ((36959, 36983), 'numpy.argsort', 'np.argsort', (['inters_sizes'], {}), '(inters_sizes)\n', (36969, 36983), True, 'import numpy as np\n'), ((37044, 37070), 'numpy.argsort', 'np.argsort', (['inters_degrees'], {}), '(inters_degrees)\n', (37054, 37070), True, 'import numpy as np\n'), ((17308, 17330), 'numpy.ceil', 'np.ceil', (['(num_plots / 2)'], {}), '(num_plots / 2)\n', (17315, 17330), True, 'import numpy as np\n'), ((21519, 21623), 'matplotlib.patches.Rectangle', 'Rectangle', (['(xlim[0], y - row_width / 2)'], {'height': 'row_width', 'width': 'xlim[1]', 'color': 'background', 'zorder': '(0)'}), '((xlim[0], y - row_width / 2), height=row_width, width=xlim[1],\n color=background, zorder=0)\n', (21528, 21623), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((34464, 34502), 'itertools.combinations', 'combinations', (['self.ordered_df_names', 'i'], {}), '(self.ordered_df_names, i)\n', (34476, 34502), False, 'from itertools import chain, combinations\n'), ((35077, 35120), 'pandas.Index', 'pd.Index', (['self.df_dict[s][self.unique_keys]'], {}), '(self.df_dict[s][self.unique_keys])\n', (35085, 35120), True, 'import pandas as pd\n'), ((35249, 35292), 'pandas.Index', 'pd.Index', (['self.df_dict[s][self.unique_keys]'], {}), '(self.df_dict[s][self.unique_keys])\n', (35257, 35292), True, 'import pandas as pd\n'), ((27761, 27867), 'matplotlib.patches.Rectangle', 'Rectangle', (['(xlims[1], y - row_width / 2)'], {'height': 'row_width', 'width': 'xlims[0]', 'color': 'background', 'zorder': '(0)'}), '((xlims[1], y - row_width / 2), height=row_width, width=xlims[0],\n color=background, zorder=0)\n', (27770, 27867), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((28143, 28249), 'matplotlib.patches.Rectangle', 'Rectangle', (['(xlims[0], y - row_width / 2)'], {'height': 'row_width', 'width': 'xlims[1]', 'color': 'background', 'zorder': '(0)'}), '((xlims[0], y - row_width / 2), height=row_width, width=xlims[1],\n color=background, zorder=0)\n', (28152, 28249), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((35367, 35413), 'pandas.Index', 'pd.Index', (['self.df_dict[seed][self.unique_keys]'], {}), '(self.df_dict[seed][self.unique_keys])\n', (35375, 35413), True, 'import pandas as pd\n')] |
# Collection of local and semilocal functionals
import numpy as np
from .field import DirectField,ReciprocalField
from .grid import DirectGrid, ReciprocalGrid
def ThomasFermiPotential(self):
'''
The Thomas-Fermi Potential
'''
return (3.0/10.0)*(5.0/3.0)*(3.0*np.pi**2)**(2.0/3.0)*self**(2.0/3.0)
def ThomasFermiEnergy(self):
'''
The Thomas-Fermi Energy Density
'''
edens = (3.0/10.0)*(3.0*np.pi**2)**(2.0/3.0)*self**(5.0/3.0)
return edens
def vonWeizsackerPotential(self,Sigma=0.025):
'''
The von Weizsacker Potential
'''
if not isinstance(Sigma,(np.generic,int,float)):
print('Bad type for Sigma')
return Exception
small = np.float(1.0e-6)
reciprocal_grid = self.grid.get_reciprocal()
gg = reciprocal_grid.gg
sq_dens = np.sqrt(np.real(self))
n2_sq_dens = sq_dens.fft()*np.exp(-0.5*gg*Sigma**2)*gg
#sq_placed = np.place(sq_dens,sq_dens<small,small)
return DirectField(grid=self.grid,griddata_3d=0.5*np.real(n2_sq_dens.ifft())/sq_dens)
def vonWeizsackerEnergy(self):
'''
The von Weizsacker Energy Density
'''
sq_dens = np.sqrt(self)
return DirectField(grid=self.grid,griddata_3d=0.5*np.real(np.einsum('ijkl->ijk',sq_dens.gradient()**2)))
| [
"numpy.exp",
"numpy.sqrt",
"numpy.float",
"numpy.real"
] | [((710, 725), 'numpy.float', 'np.float', (['(1e-06)'], {}), '(1e-06)\n', (718, 725), True, 'import numpy as np\n'), ((1146, 1159), 'numpy.sqrt', 'np.sqrt', (['self'], {}), '(self)\n', (1153, 1159), True, 'import numpy as np\n'), ((827, 840), 'numpy.real', 'np.real', (['self'], {}), '(self)\n', (834, 840), True, 'import numpy as np\n'), ((873, 903), 'numpy.exp', 'np.exp', (['(-0.5 * gg * Sigma ** 2)'], {}), '(-0.5 * gg * Sigma ** 2)\n', (879, 903), True, 'import numpy as np\n')] |
import numpy as np
from .. import diagnostics as di
from numpy.testing import assert_almost_equal, assert_array_equal
def test_evd():
# We make a fake 4D image
y = [3, 7, 8, 12, 20]
x = di.extend_diff_outliers(y)
assert_array_equal(x, [3, 4, 7, 8, 9, 12, 13, 20, 21])
| [
"numpy.testing.assert_array_equal"
] | [((230, 284), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['x', '[3, 4, 7, 8, 9, 12, 13, 20, 21]'], {}), '(x, [3, 4, 7, 8, 9, 12, 13, 20, 21])\n', (248, 284), False, 'from numpy.testing import assert_almost_equal, assert_array_equal\n')] |
"""
Project: RadarBook
File: right_circular_cone.py
Created by: <NAME>
One: 11/24/2018
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from numpy import sin, cos, exp, sqrt
from scipy.constants import c, pi
def radar_cross_section(frequency, cone_half_angle, base_radius, incident_angle):
"""
Calculate the radar cross section of a right circular cone.
:param frequency: The operating frequency (Hz).
:param cone_half_angle: The cone half angle (rad).
:param base_radius: The base radius (m).
:param incident_angle: The incident angle (rad).
:return: The radar cross section of a right circular cone (m^2).
"""
# Wavelength
wavelength = c / frequency
# Wavenumber
k = 2.0 * pi / wavelength
# Parameter "n"
n = 1.5 + cone_half_angle / pi
# Common factor
if incident_angle != 0.0:
value = (wavelength ** 2 * k * base_radius) / (4.0 * pi ** 2) * (sin(pi / n) / n) ** 2 / sin(incident_angle)
# Special case values
term1 = 1.0 / (cos(pi / n) - cos(3.0 * pi / n))
term2 = sin(pi / n) * exp(1j * (2.0 * k * base_radius - pi / 4.0)) / \
(n * sqrt(pi * k * base_radius) * (cos(pi / n) - cos(3.0 * pi / (2.0 * n))) ** 2)
nose_max = (wavelength ** 2 / pi) * (k * base_radius * sin(pi / n) / n) ** 2 * abs(term1 + term2) ** 2
spec_max = wavelength ** 2 * 8.0 * pi / 9.0 * (base_radius / wavelength) ** 3 / \
(sin(cone_half_angle) ** 2 * cos(cone_half_angle))
base_max = wavelength ** 2 * (k * base_radius) ** 4 / (4.0 * pi)
# Calculate the radar cross section
if incident_angle < 1e-6:
# Nose on, double diffraction on base
term1 = 1.0 / (cos(pi / n) - cos(3.0 * pi / n))
term2 = sin(pi / n) * exp(1j * (2.0 * k * base_radius - pi / 4.0)) / \
(n * sqrt(pi * k * base_radius) * (cos(pi/n) - cos(3.0 * pi / (2.0 * n))) ** 2)
rcs_vv = (wavelength ** 2 / pi) * (k * base_radius * sin(pi / n) / n) ** 2 * abs(term1 + term2) ** 2
rcs_hh = rcs_vv
elif abs(incident_angle - pi) < 1e-6:
# Base specular
rcs_vv = wavelength ** 2 * (k * base_radius) ** 4 / (4.0 * pi)
rcs_hh = rcs_vv
elif abs(incident_angle - (0.5 * pi - cone_half_angle)) < 1e-6:
# Normal to the generator of the cone
rcs_vv = wavelength ** 2 * 8.0 * pi / 9.0 * (base_radius / wavelength) ** 3 / \
(sin(cone_half_angle) ** 2 * cos(cone_half_angle))
rcs_hh = rcs_vv
elif 0.0 < incident_angle < cone_half_angle:
term1 = exp(1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term2 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term3 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term4 = exp(-1j*(2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term5 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi + 2.0 * incident_angle) / n))
term6 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi + 2.0 * incident_angle) / n))
rcs_vv = value * abs(term1 * term2 + term4 * term5) ** 2
rcs_hh = value * abs(term1 * term3 + term4 * term6) ** 2
if rcs_vv > nose_max:
rcs_vv = nose_max
if rcs_hh > nose_max:
rcs_hh = nose_max
elif cone_half_angle <= incident_angle < 0.5 * pi:
term1 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term2 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
rcs_vv = value * term1 ** 2
rcs_hh = value * term2 ** 2
if rcs_vv > 0.8 * spec_max:
rcs_vv = spec_max * cos(25 * (incident_angle - (0.5 * pi - cone_half_angle)))
if rcs_hh > 0.8 * spec_max:
rcs_hh = spec_max * cos(25 * (incident_angle - (0.5 * pi - cone_half_angle)))
elif 0.5 * pi <= incident_angle < pi:
term1 = exp(1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term2 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term3 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((3.0 * pi - 2.0 * incident_angle) / n))
term4 = exp(-1j * (2.0 * k * base_radius * sin(incident_angle) - pi / 4.0))
term5 = 1.0 / (cos(pi / n) - 1.0) - 1.0 / (cos(pi / n) - cos((2.0 * incident_angle - pi) / n))
term6 = 1.0 / (cos(pi / n) - 1.0) + 1.0 / (cos(pi / n) - cos((2.0 * incident_angle - pi) / n))
rcs_vv = value * abs(term1 * term2 + term4 * term5) ** 2
rcs_hh = value * abs(term1 * term3 + term4 * term6) ** 2
if rcs_vv > base_max:
rcs_vv = base_max
if rcs_hh > base_max:
rcs_hh = base_max
return rcs_vv, rcs_hh
| [
"numpy.sin",
"numpy.exp",
"numpy.sqrt",
"numpy.cos"
] | [((1122, 1141), 'numpy.sin', 'sin', (['incident_angle'], {}), '(incident_angle)\n', (1125, 1141), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1188, 1199), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (1191, 1199), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1202, 1219), 'numpy.cos', 'cos', (['(3.0 * pi / n)'], {}), '(3.0 * pi / n)\n', (1205, 1219), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1233, 1244), 'numpy.sin', 'sin', (['(pi / n)'], {}), '(pi / n)\n', (1236, 1244), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1247, 1293), 'numpy.exp', 'exp', (['(1.0j * (2.0 * k * base_radius - pi / 4.0))'], {}), '(1.0j * (2.0 * k * base_radius - pi / 4.0))\n', (1250, 1293), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1629, 1649), 'numpy.cos', 'cos', (['cone_half_angle'], {}), '(cone_half_angle)\n', (1632, 1649), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1313, 1339), 'numpy.sqrt', 'sqrt', (['(pi * k * base_radius)'], {}), '(pi * k * base_radius)\n', (1317, 1339), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1601, 1621), 'numpy.sin', 'sin', (['cone_half_angle'], {}), '(cone_half_angle)\n', (1604, 1621), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1860, 1871), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (1863, 1871), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1874, 1891), 'numpy.cos', 'cos', (['(3.0 * pi / n)'], {}), '(3.0 * pi / n)\n', (1877, 1891), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1909, 1920), 'numpy.sin', 'sin', (['(pi / n)'], {}), '(pi / n)\n', (1912, 1920), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1923, 1969), 'numpy.exp', 'exp', (['(1.0j * (2.0 * k * base_radius - pi / 4.0))'], {}), '(1.0j * (2.0 * k * base_radius - pi / 4.0))\n', (1926, 1969), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1343, 1354), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (1346, 1354), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1357, 1382), 'numpy.cos', 'cos', (['(3.0 * pi / (2.0 * n))'], {}), '(3.0 * pi / (2.0 * n))\n', (1360, 1382), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1993, 2019), 'numpy.sqrt', 'sqrt', (['(pi * k * base_radius)'], {}), '(pi * k * base_radius)\n', (1997, 2019), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1098, 1109), 'numpy.sin', 'sin', (['(pi / n)'], {}), '(pi / n)\n', (1101, 1109), False, 'from numpy import sin, cos, exp, sqrt\n'), ((1449, 1460), 'numpy.sin', 'sin', (['(pi / n)'], {}), '(pi / n)\n', (1452, 1460), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2023, 2034), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (2026, 2034), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2035, 2060), 'numpy.cos', 'cos', (['(3.0 * pi / (2.0 * n))'], {}), '(3.0 * pi / (2.0 * n))\n', (2038, 2060), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2610, 2630), 'numpy.cos', 'cos', (['cone_half_angle'], {}), '(cone_half_angle)\n', (2613, 2630), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2129, 2140), 'numpy.sin', 'sin', (['(pi / n)'], {}), '(pi / n)\n', (2132, 2140), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2582, 2602), 'numpy.sin', 'sin', (['cone_half_angle'], {}), '(cone_half_angle)\n', (2585, 2602), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2812, 2823), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (2815, 2823), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2840, 2851), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (2843, 2851), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2854, 2896), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (2857, 2896), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2921, 2932), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (2924, 2932), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2949, 2960), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (2952, 2960), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2963, 3005), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (2966, 3005), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3113, 3124), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3116, 3124), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3141, 3152), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3144, 3152), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3155, 3197), 'numpy.cos', 'cos', (['((3.0 * pi + 2.0 * incident_angle) / n)'], {}), '((3.0 * pi + 2.0 * incident_angle) / n)\n', (3158, 3197), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3222, 3233), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3225, 3233), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3250, 3261), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3253, 3261), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3264, 3306), 'numpy.cos', 'cos', (['((3.0 * pi + 2.0 * incident_angle) / n)'], {}), '((3.0 * pi + 2.0 * incident_angle) / n)\n', (3267, 3306), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3977, 4034), 'numpy.cos', 'cos', (['(25 * (incident_angle - (0.5 * pi - cone_half_angle)))'], {}), '(25 * (incident_angle - (0.5 * pi - cone_half_angle)))\n', (3980, 4034), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4104, 4161), 'numpy.cos', 'cos', (['(25 * (incident_angle - (0.5 * pi - cone_half_angle)))'], {}), '(25 * (incident_angle - (0.5 * pi - cone_half_angle)))\n', (4107, 4161), False, 'from numpy import sin, cos, exp, sqrt\n'), ((2756, 2775), 'numpy.sin', 'sin', (['incident_angle'], {}), '(incident_angle)\n', (2759, 2775), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3057, 3076), 'numpy.sin', 'sin', (['incident_angle'], {}), '(incident_angle)\n', (3060, 3076), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3640, 3651), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3643, 3651), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3668, 3679), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3671, 3679), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3682, 3724), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (3685, 3724), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3749, 3760), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3752, 3760), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3777, 3788), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (3780, 3788), False, 'from numpy import sin, cos, exp, sqrt\n'), ((3791, 3833), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (3794, 3833), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4311, 4322), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4314, 4322), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4339, 4350), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4342, 4350), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4353, 4395), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (4356, 4395), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4420, 4431), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4423, 4431), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4448, 4459), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4451, 4459), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4462, 4504), 'numpy.cos', 'cos', (['((3.0 * pi - 2.0 * incident_angle) / n)'], {}), '((3.0 * pi - 2.0 * incident_angle) / n)\n', (4465, 4504), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4614, 4625), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4617, 4625), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4642, 4653), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4645, 4653), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4656, 4692), 'numpy.cos', 'cos', (['((2.0 * incident_angle - pi) / n)'], {}), '((2.0 * incident_angle - pi) / n)\n', (4659, 4692), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4717, 4728), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4720, 4728), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4745, 4756), 'numpy.cos', 'cos', (['(pi / n)'], {}), '(pi / n)\n', (4748, 4756), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4759, 4795), 'numpy.cos', 'cos', (['((2.0 * incident_angle - pi) / n)'], {}), '((2.0 * incident_angle - pi) / n)\n', (4762, 4795), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4255, 4274), 'numpy.sin', 'sin', (['incident_angle'], {}), '(incident_angle)\n', (4258, 4274), False, 'from numpy import sin, cos, exp, sqrt\n'), ((4558, 4577), 'numpy.sin', 'sin', (['incident_angle'], {}), '(incident_angle)\n', (4561, 4577), False, 'from numpy import sin, cos, exp, sqrt\n')] |
"""
@file
@brief Implements a transform which modifies the target
and applies the reverse transformation on the target.
"""
import numpy
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import NearestNeighbors
from .sklearn_transform_inv import BaseReciprocalTransformer
class FunctionReciprocalTransformer(BaseReciprocalTransformer):
"""
The transform is used to apply a function on a the target,
predict, then transform the target back before scoring.
The transforms implements a series of predefined functions:
.. runpython::
:showcode:
import pprint
from mlinsights.mlmodel.sklearn_transform_inv_fct import FunctionReciprocalTransformer
pprint.pprint(FunctionReciprocalTransformer.available_fcts())
"""
@staticmethod
def available_fcts():
"""
Returns the list of predefined functions.
"""
return {
'log': (numpy.log, 'exp'),
'exp': (numpy.exp, 'log'),
'log(1+x)': (lambda x: numpy.log(x + 1), 'exp(x)-1'),
'log1p': (numpy.log1p, 'expm1'),
'exp(x)-1': (lambda x: numpy.exp(x) - 1, 'log'),
'expm1': (numpy.expm1, 'log1p'),
}
def __init__(self, fct, fct_inv=None):
"""
@param fct function name of numerical function
@param fct_inv optional if *fct* is a function name,
reciprocal function otherwise
"""
BaseReciprocalTransformer.__init__(self)
if isinstance(fct, str):
if fct_inv is not None:
raise ValueError( # pragma: no cover
"If fct is a function name, fct_inv must not be specified.")
opts = self.__class__.available_fcts()
if fct not in opts:
raise ValueError( # pragma: no cover
"Unknown fct '{}', it should in {}.".format(
fct, list(sorted(opts))))
else:
if fct_inv is None:
raise ValueError(
"If fct is callable, fct_inv must be specified.")
self.fct = fct
self.fct_inv = fct_inv
def fit(self, X=None, y=None, sample_weight=None):
"""
Just defines *fct* and *fct_inv*.
"""
if callable(self.fct):
self.fct_ = self.fct
self.fct_inv_ = self.fct_inv
else:
opts = self.__class__.available_fcts()
self.fct_, self.fct_inv_ = opts[self.fct]
return self
def get_fct_inv(self):
"""
Returns a trained transform which reverse the target
after a predictor.
"""
if isinstance(self.fct_inv_, str):
res = FunctionReciprocalTransformer(self.fct_inv_)
else:
res = FunctionReciprocalTransformer(self.fct_inv_, self.fct_)
return res.fit()
def transform(self, X, y):
"""
Transforms *X* and *y*.
Returns transformed *X* and *y*.
If *y* is None, the returned value for *y*
is None as well.
"""
if y is None:
return X, None
return X, self.fct_(y)
class PermutationReciprocalTransformer(BaseReciprocalTransformer):
"""
The transform is used to permute targets,
predict, then permute the target back before scoring.
nan values remain nan values. Once fitted, the transform
has attribute ``permutation_`` which keeps
track of the permutation to apply.
"""
def __init__(self, random_state=None, closest=False):
"""
@param random_state random state
@param closest if True, finds the closest permuted element
"""
BaseReciprocalTransformer.__init__(self)
self.random_state = random_state
self.closest = closest
def fit(self, X=None, y=None, sample_weight=None):
"""
Defines a random permutation over the targets.
"""
if y is None:
raise RuntimeError( # pragma: no cover
"targets cannot be empty.")
num = numpy.issubdtype(y.dtype, numpy.floating)
perm = {}
for u in y.ravel():
if num and numpy.isnan(u):
continue
if u in perm:
continue
perm[u] = len(perm)
lin = numpy.arange(len(perm))
if self.random_state is None:
lin = numpy.random.permutation(lin)
else:
rs = numpy.random.RandomState( # pylint: disable=E1101
self.random_state) # pylint: disable=E1101
lin = rs.permutation(lin)
for u in perm:
perm[u] = lin[perm[u]]
self.permutation_ = perm
def _check_is_fitted(self):
if not hasattr(self, 'permutation_'):
raise NotFittedError( # pragma: no cover
"This instance {} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.".format(
type(self)))
def get_fct_inv(self):
"""
Returns a trained transform which reverse the target
after a predictor.
"""
self._check_is_fitted()
res = PermutationReciprocalTransformer(
self.random_state, closest=self.closest)
res.permutation_ = {v: k for k, v in self.permutation_.items()}
return res
def _find_closest(self, cl):
if not hasattr(self, 'knn_'):
self.knn_ = NearestNeighbors(n_neighbors=1, algorithm='kd_tree')
self.knn_perm_ = numpy.array(list(self.permutation_))
self.knn_perm_ = self.knn_perm_.reshape((len(self.knn_perm_), 1))
self.knn_.fit(self.knn_perm_)
ind = self.knn_.kneighbors([[cl]], return_distance=False)
res = self.knn_perm_[ind, 0]
if self.knn_perm_.dtype in (numpy.float32, numpy.float64):
return float(res)
if self.knn_perm_.dtype in (numpy.int32, numpy.int64):
return int(res)
raise NotImplementedError( # pragma: no cover
"The function does not work for type {}.".format(
self.knn_perm_.dtype))
def transform(self, X, y):
"""
Transforms *X* and *y*.
Returns transformed *X* and *y*.
If *y* is None, the returned value for *y*
is None as well.
"""
if y is None:
return X, None
self._check_is_fitted()
if len(y.shape) == 1 or y.dtype in (numpy.str, numpy.int32, numpy.int64):
# permutes classes
yp = y.copy().ravel()
num = numpy.issubdtype(y.dtype, numpy.floating)
for i in range(len(yp)): # pylint: disable=C0200
if num and numpy.isnan(yp[i]):
continue
if yp[i] not in self.permutation_:
if self.closest:
cl = self._find_closest(yp[i])
else:
raise RuntimeError("Unable to find key '{}' in {}.".format(
yp[i], list(sorted(self.permutation_))))
else:
cl = yp[i]
yp[i] = self.permutation_[cl]
return X, yp.reshape(y.shape)
else:
# y is probababilies or raw score
if len(y.shape) != 2:
raise RuntimeError(
"yp should be a matrix but has shape {}.".format(y.shape))
cl = [(v, k) for k, v in self.permutation_.items()]
cl.sort()
new_perm = {}
for cl, current in cl:
new_perm[current] = len(new_perm)
yp = y.copy()
for i in range(y.shape[1]):
yp[:, new_perm[i]] = y[:, i]
return X, yp
| [
"numpy.log",
"numpy.exp",
"numpy.issubdtype",
"numpy.isnan",
"sklearn.neighbors.NearestNeighbors",
"numpy.random.RandomState",
"numpy.random.permutation"
] | [((4151, 4192), 'numpy.issubdtype', 'numpy.issubdtype', (['y.dtype', 'numpy.floating'], {}), '(y.dtype, numpy.floating)\n', (4167, 4192), False, 'import numpy\n'), ((4481, 4510), 'numpy.random.permutation', 'numpy.random.permutation', (['lin'], {}), '(lin)\n', (4505, 4510), False, 'import numpy\n'), ((4542, 4585), 'numpy.random.RandomState', 'numpy.random.RandomState', (['self.random_state'], {}), '(self.random_state)\n', (4566, 4585), False, 'import numpy\n'), ((5554, 5606), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=1, algorithm='kd_tree')\n", (5570, 5606), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((6691, 6732), 'numpy.issubdtype', 'numpy.issubdtype', (['y.dtype', 'numpy.floating'], {}), '(y.dtype, numpy.floating)\n', (6707, 6732), False, 'import numpy\n'), ((4262, 4276), 'numpy.isnan', 'numpy.isnan', (['u'], {}), '(u)\n', (4273, 4276), False, 'import numpy\n'), ((1036, 1052), 'numpy.log', 'numpy.log', (['(x + 1)'], {}), '(x + 1)\n', (1045, 1052), False, 'import numpy\n'), ((6822, 6840), 'numpy.isnan', 'numpy.isnan', (['yp[i]'], {}), '(yp[i])\n', (6833, 6840), False, 'import numpy\n'), ((1147, 1159), 'numpy.exp', 'numpy.exp', (['x'], {}), '(x)\n', (1156, 1159), False, 'import numpy\n')] |
# Based on
# https://learn.adafruit.com/adafruit-pioled-128x32-mini-oled-for-raspberry-pi/
import time
import sys
from board import SCL, SDA
import busio
from PIL import Image, ImageDraw
import adafruit_ssd1306
import numpy as np
from life import LifeBoard, SparseSetRules, SparseSetState
np.set_printoptions(threshold=sys.maxsize, linewidth=300)
# Define the simple rotor
rotor = {(16, 16), (17, 16), (18, 16)}
glider = {(10, 11), (11, 11), (12, 11), (12, 12), (11, 13)}
simkin_gun = [
"OO.....OO........................",
"OO.....OO........................",
".................................",
"....OO...........................",
"....OO...........................",
".................................",
".................................",
".................................",
".................................",
"......................OO.OO......",
".....................O.....O.....",
".....................O......O..OO",
".....................OOO...O...OO",
"..........................O......",
".................................",
".................................",
".................................",
"....................OO...........",
"....................O............",
".....................OOO.........",
".......................O.........",
]
def convert_to_tuples(array_of_strings, offset_x, offset_y):
lengths = [len(s) for s in array_of_strings]
assert np.all(np.asarray(lengths) == lengths[0])
tuples = set()
for j in range(len(array_of_strings)):
for i in range(lengths[0]):
if array_of_strings[j][i] == "O":
tuples.add((i + offset_x, j + offset_y))
return tuples
# Create the I2C interface.
i2c = busio.I2C(SCL, SDA)
# Create the SSD1306 OLED class.
# The first two parameters are the pixel width and pixel height. Change these
# to the right size for your display!
disp = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# Clear display.
disp.fill(0)
disp.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new("1", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
rules = SparseSetRules()
state = SparseSetState(convert_to_tuples(simkin_gun, 60, 8))
# state = SparseSetState(glider)
board = LifeBoard(
state, rules, disp.width, disp.height, x_wrap=True, y_wrap=True
)
print(board.state.to_dense(disp.width, disp.height))
image = Image.new("1", (width, height))
for c in board.state.grid:
image.putpixel((c[0], c[1]), 1)
disp.image(image)
disp.show()
while True:
board.update()
image = Image.new("1", (width, height))
for c in board.state.grid:
image.putpixel((c[0], c[1]), 1)
disp.image(image)
disp.show()
| [
"adafruit_ssd1306.SSD1306_I2C",
"life.SparseSetRules",
"PIL.Image.new",
"busio.I2C",
"numpy.asarray",
"PIL.ImageDraw.Draw",
"life.LifeBoard",
"numpy.set_printoptions"
] | [((293, 350), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize', 'linewidth': '(300)'}), '(threshold=sys.maxsize, linewidth=300)\n', (312, 350), True, 'import numpy as np\n'), ((1777, 1796), 'busio.I2C', 'busio.I2C', (['SCL', 'SDA'], {}), '(SCL, SDA)\n', (1786, 1796), False, 'import busio\n'), ((1955, 1997), 'adafruit_ssd1306.SSD1306_I2C', 'adafruit_ssd1306.SSD1306_I2C', (['(128)', '(32)', 'i2c'], {}), '(128, 32, i2c)\n', (1983, 1997), False, 'import adafruit_ssd1306\n'), ((2183, 2214), 'PIL.Image.new', 'Image.new', (['"""1"""', '(width, height)'], {}), "('1', (width, height))\n", (2192, 2214), False, 'from PIL import Image, ImageDraw\n'), ((2262, 2283), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (2276, 2283), False, 'from PIL import Image, ImageDraw\n'), ((2397, 2413), 'life.SparseSetRules', 'SparseSetRules', ([], {}), '()\n', (2411, 2413), False, 'from life import LifeBoard, SparseSetRules, SparseSetState\n'), ((2516, 2590), 'life.LifeBoard', 'LifeBoard', (['state', 'rules', 'disp.width', 'disp.height'], {'x_wrap': '(True)', 'y_wrap': '(True)'}), '(state, rules, disp.width, disp.height, x_wrap=True, y_wrap=True)\n', (2525, 2590), False, 'from life import LifeBoard, SparseSetRules, SparseSetState\n'), ((2660, 2691), 'PIL.Image.new', 'Image.new', (['"""1"""', '(width, height)'], {}), "('1', (width, height))\n", (2669, 2691), False, 'from PIL import Image, ImageDraw\n'), ((2830, 2861), 'PIL.Image.new', 'Image.new', (['"""1"""', '(width, height)'], {}), "('1', (width, height))\n", (2839, 2861), False, 'from PIL import Image, ImageDraw\n'), ((1487, 1506), 'numpy.asarray', 'np.asarray', (['lengths'], {}), '(lengths)\n', (1497, 1506), True, 'import numpy as np\n')] |
#!/usr/bin/env python
__all__ = ['render', 'show', 'set_scene']
import numpy as np
from ..vec3 import Vec3
from .helper import intersect, show, set_scene
class RayType:
kUnknownRay, kCameraRay, kShadowRay = range(3)
class Ray:
def __init__(self, orig, direction):
self.orig = orig
self.direction = direction
self.tmin = 0
self.tmax = 1.e24
self.type = RayType.kUnknownRay
def __call__(self, t):
return self.orig + self.direction * t
class Atmosphere:
def __init__(self, sd=Vec3(0., 1., 0.), re=6360.e3,
ra=6420.e3, hr=7994., hm=1200.):
self.sundir = sd
self.radiusEarth = re
self.radiusAtmosphere = ra
self.Hr = hr
self.Hm = hm
# For Mars
# self.sundir = sd
# self.radiusEarth = 3389.5e3
# self.radiusAtmosphere = 3396.2e3
# self.Hr = hr
# self.Hm = hm
# Rayleigh scattering coefficients at sea level (for Earth)
# 440 nm, 550 nm, 680 nm
self.betaR = Vec3(3.8e-6, 13.5e-6, 33.1e-6)
# Rayleigh scattering coefficients (for Mars)
# 440 nm, 550 nm, 680 nm
# self.betaR = Vec3(5.75e-3, 13.57e-3, 19.918e-3)
# Mie scattering coefficient at sea level (for Earth)
self.betaM = Vec3(21.e-6)
def compute_incident_light(self, r):
t0 = self.radiusEarth + 1
t1 = 0
t = [t0, t1]
if (not intersect(r, self.radiusAtmosphere, t)) or (t1 < 0.):
return Vec3(0)
t0, t1 = t
if (t0 > r.tmin) and (t0 > 0):
r.tmin = t0
if t1 < r.tmax:
r.tmax = t1
numSamples = 16.
numSamplesLight = 16.
segmentLength = (r.tmax - r.tmin) / numSamples
tCurrent = r.tmin
sumR = Vec3(0.)
sumM = Vec3(0.)
opticalDepthR = 0
opticalDepthM = 0
mu = r.direction.dot(self.sundir)
# Anisotropy of the medium (aerosol)
# if g = 0, function is equal to rayleigh
g = 0.76
phaseR = 3. / (16. * np.pi) * (mu * mu + 1.)
phaseM = 3. / (8. * np.pi) * \
((1. - g * g) * (1. + mu * mu)) / \
((2. + g * g) * np.power(1. + g * g - 2. * g * mu, 1.5))
for i in np.arange(numSamples):
samplePosition = r(tCurrent + segmentLength * 0.5)
height = samplePosition.length() - self.radiusEarth
hr = segmentLength * np.exp(-height / self.Hr)
hm = segmentLength * np.exp(-height / self.Hm)
opticalDepthR += hr
opticalDepthM += hm
lightRay = Ray(samplePosition, self.sundir)
l = [lightRay.tmin, lightRay.tmax]
intersect(lightRay, self.radiusAtmosphere, l)
lightRay.tmin, lightRay.tmax = l
segmentLengthLight = lightRay.tmax / numSamplesLight
tCurrentLight = 0
opticalDepthLightR = 0
opticalDepthLightM = 0
for j in np.arange(numSamplesLight):
samplePositionLight = \
lightRay(tCurrentLight + segmentLengthLight * 0.5)
heightLight = samplePositionLight.length() - self.radiusEarth
if heightLight < 0.:
break
opticalDepthLightR += \
segmentLengthLight * np.exp(-heightLight / self.Hr)
opticalDepthLightM += \
segmentLengthLight * np.exp(-heightLight / self.Hm)
tCurrentLight += segmentLengthLight
if numSamplesLight == (j + 1.):
tau = self.betaR * (opticalDepthR + opticalDepthLightR) + \
self.betaM * 1.1 * (opticalDepthM + opticalDepthLightM)
attenuation = Vec3(np.exp(-tau.vec[0]),
np.exp(-tau.vec[1]),
np.exp(-tau.vec[2]))
sumR = sumR + attenuation * hr
sumM = sumM + attenuation * hm
tCurrent += segmentLength
# 20 is a magic number :)
# For Mars, 20e25
return (sumR * self.betaR * phaseR + sumM * self.betaM * phaseM) * 20.
def render(scene):
sun_direction, window = scene
width, height = window
atmosphere = Atmosphere(sun_direction)
r = np.zeros(width * height).reshape((width, height))
g = np.zeros(width * height).reshape((width, height))
b = np.zeros(width * height).reshape((width, height))
origin = Vec3(0., atmosphere.radiusEarth + 1., 0.)
for j in np.arange(height):
y = (j + 0.5) * 2. / (height - 1.) - 1.
for i in np.arange(width):
x = (i + 0.5) * 2. / (width - 1.) - 1.
z2 = x * x + y * y
if z2 <= 1.:
phi = np.arctan2(y, x)
theta = np.arccos(1. - z2)
direction = Vec3(np.sin(theta) * np.cos(phi),
np.cos(theta),
np.sin(theta) * np.sin(phi))
ray = Ray(origin, direction)
flux = atmosphere.compute_incident_light(ray)
r[i][j] = flux.vec[0]
g[i][j] = flux.vec[1]
b[i][j] = flux.vec[2]
data = np.zeros((width, height, 3))
data[..., 0] = r
data[..., 1] = g
data[..., 2] = b
return data
| [
"numpy.arccos",
"numpy.power",
"numpy.exp",
"numpy.zeros",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"numpy.arange"
] | [((4611, 4628), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (4620, 4628), True, 'import numpy as np\n'), ((5312, 5340), 'numpy.zeros', 'np.zeros', (['(width, height, 3)'], {}), '((width, height, 3))\n', (5320, 5340), True, 'import numpy as np\n'), ((2309, 2330), 'numpy.arange', 'np.arange', (['numSamples'], {}), '(numSamples)\n', (2318, 2330), True, 'import numpy as np\n'), ((4696, 4712), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (4705, 4712), True, 'import numpy as np\n'), ((3039, 3065), 'numpy.arange', 'np.arange', (['numSamplesLight'], {}), '(numSamplesLight)\n', (3048, 3065), True, 'import numpy as np\n'), ((4375, 4399), 'numpy.zeros', 'np.zeros', (['(width * height)'], {}), '(width * height)\n', (4383, 4399), True, 'import numpy as np\n'), ((4433, 4457), 'numpy.zeros', 'np.zeros', (['(width * height)'], {}), '(width * height)\n', (4441, 4457), True, 'import numpy as np\n'), ((4491, 4515), 'numpy.zeros', 'np.zeros', (['(width * height)'], {}), '(width * height)\n', (4499, 4515), True, 'import numpy as np\n'), ((2250, 2291), 'numpy.power', 'np.power', (['(1.0 + g * g - 2.0 * g * mu)', '(1.5)'], {}), '(1.0 + g * g - 2.0 * g * mu, 1.5)\n', (2258, 2291), True, 'import numpy as np\n'), ((2493, 2518), 'numpy.exp', 'np.exp', (['(-height / self.Hr)'], {}), '(-height / self.Hr)\n', (2499, 2518), True, 'import numpy as np\n'), ((2552, 2577), 'numpy.exp', 'np.exp', (['(-height / self.Hm)'], {}), '(-height / self.Hm)\n', (2558, 2577), True, 'import numpy as np\n'), ((4844, 4860), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (4854, 4860), True, 'import numpy as np\n'), ((4885, 4904), 'numpy.arccos', 'np.arccos', (['(1.0 - z2)'], {}), '(1.0 - z2)\n', (4894, 4904), True, 'import numpy as np\n'), ((3402, 3432), 'numpy.exp', 'np.exp', (['(-heightLight / self.Hr)'], {}), '(-heightLight / self.Hr)\n', (3408, 3432), True, 'import numpy as np\n'), ((3514, 3544), 'numpy.exp', 'np.exp', (['(-heightLight / self.Hm)'], {}), '(-heightLight / self.Hm)\n', (3520, 3544), True, 'import numpy as np\n'), ((3833, 3852), 'numpy.exp', 'np.exp', (['(-tau.vec[0])'], {}), '(-tau.vec[0])\n', (3839, 3852), True, 'import numpy as np\n'), ((3889, 3908), 'numpy.exp', 'np.exp', (['(-tau.vec[1])'], {}), '(-tau.vec[1])\n', (3895, 3908), True, 'import numpy as np\n'), ((3945, 3964), 'numpy.exp', 'np.exp', (['(-tau.vec[2])'], {}), '(-tau.vec[2])\n', (3951, 3964), True, 'import numpy as np\n'), ((5000, 5013), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5006, 5013), True, 'import numpy as np\n'), ((4938, 4951), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4944, 4951), True, 'import numpy as np\n'), ((4954, 4965), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4960, 4965), True, 'import numpy as np\n'), ((5048, 5061), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5054, 5061), True, 'import numpy as np\n'), ((5064, 5075), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5070, 5075), True, 'import numpy as np\n')] |
import os
import csv
import numpy.random as random
def MAPE(outputs, labels):
# mean absolute percent error
relative_MSE = 0
count = 0
for i, j in zip(outputs, labels):
for m, n in zip(i, j):
# print(m, n)
# logger.info('outputs:{}, labels:{}'.format(m, n))
relative_MSE += (m[0] / n[0] - 1) ** 2
count += 1
relative_MSE /= count
def getconfig(depth,resolution):
config = []
for i1 in depth:
for i2 in depth:
for i3 in depth:
for i4 in depth:
for i5 in depth:
for i6 in depth:
for i7 in depth:
for i8 in depth:
for i9 in depth:
for i10 in depth:
for j in resolution:
depths = []
depths.append(i1)
depths.append(i2)
depths.append(i3)
depths.append(i4)
depths.append(i5)
depths.append(i6)
depths.append(i7)
depths.append(i8)
depths.append(i9)
depths.append(i10)
depths.append(j)
config.append(depths)
file_path = './dataset/config.csv'
if not os.path.exists(file_path):
# csvfile = open(file_path,'w')
csvfile = open(file_path, 'w', encoding='utf-8', newline='')
writer = csv.writer(csvfile)
for fig in config:
writer.writerow(fig)
csvfile.flush()
return config
def random_sample(num_stages, depths,num_blocks,expand_ratios,kernel_sizes):
sample = {}
d = []
e = []
ks = []
# for i in range(num_stages):
# d.append(random.choice(depths, p=[0.25] * 4))
for i in range(num_blocks):
e.append(random.choice(expand_ratios, p=[0.1, 0.45, 0.45]))
ks.append(random.choice(kernel_sizes, p=[0.1, 0.45, 0.45]))
sample = {
'wid': None,
'ks': ks,
'e': e,
'd': d,
'r': []
}
return sample
| [
"numpy.random.choice",
"os.path.exists",
"csv.writer"
] | [((1819, 1844), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1833, 1844), False, 'import os\n'), ((1972, 1991), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1982, 1991), False, 'import csv\n'), ((2367, 2416), 'numpy.random.choice', 'random.choice', (['expand_ratios'], {'p': '[0.1, 0.45, 0.45]'}), '(expand_ratios, p=[0.1, 0.45, 0.45])\n', (2380, 2416), True, 'import numpy.random as random\n'), ((2436, 2484), 'numpy.random.choice', 'random.choice', (['kernel_sizes'], {'p': '[0.1, 0.45, 0.45]'}), '(kernel_sizes, p=[0.1, 0.45, 0.45])\n', (2449, 2484), True, 'import numpy.random as random\n')] |
import torch
import os
from tqdm.auto import tqdm
import numpy as np
import sys
from argparse import ArgumentParser
try:
import IBA
except ModuleNotFoundError:
sys.path.insert(0, '..')
import IBA
import model.cxr_dataset as CXR
import model.merged_visualize_prediction as V
import cv2
import mmcv
from evaluation.sensitivity_n import SensitivityN
from evaluation.regression_sensitivity_n import SensitivityN as SensitivityNRegression
def parse_args():
parser = ArgumentParser('Sensitivity-N evaluation')
parser.add_argument('heatmap_dir', default="", help='config file of the attribution method')
parser.add_argument('out_dir', default="", help='config file of the attribution method')
parser.add_argument('image_path', default="", help='config file of the attribution method')
parser.add_argument('model_path', default="", help='directory of the heatmaps')
parser.add_argument('label_path', default="", help='directory to save the result file')
parser.add_argument('file_name', default="sensitivity_n.json", help='directory to save the result file')
parser.add_argument("--covid", help="covid dataset",
action="store_true")
parser.add_argument("--regression", help="regression model",
action="store_true")
parser.add_argument("--blur", help="use blurred image as baseline",
action="store_true")
parser.add_argument("--sigma", default=4., help="sigma for gaussian blur")
args = parser.parse_args()
return args
def evaluation(heatmap_dir, out_dir, image_path, model_path, label_path, file_name="sensitivity_n.json",
device='cuda:0', covid=False, regression=False, blur=False, sigma=4.):
if not covid:
category_list = [
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Mass',
'Nodule',
# 'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
else:
if regression:
category_list = ["regression"]
else:
category_list = ['Detector01', 'Detector2', 'Detector3']
# generate evaluation
log_list = np.logspace(0, 4.7, num=50)
results = {}
passed_n = 0
for n in tqdm(log_list):
passed_n += 1
score_diffs_all = []
sum_attrs_all = []
corr_all = np.array([])
for category in category_list:
corr_category = np.array([])
# get data inside category
if not covid:
dataloader, model = V.load_data(
image_path,
category,
model_path,
'BBox',
POSITIVE_FINDINGS_ONLY=True,
label_path=label_path,
return_dataloader=True)
elif covid and regression:
dataloader, model = V.load_data(
image_path,
category,
model_path,
'test',
POSITIVE_FINDINGS_ONLY=False,
covid=True,
regression=True,
label_path=label_path,
return_dataloader=True)
elif covid and not regression:
dataloader, model = V.load_data(
image_path,
category,
model_path,
'test',
POSITIVE_FINDINGS_ONLY=True,
covid=True,
regression=False,
label_path=label_path,
return_dataloader=True)
if regression:
evaluator = SensitivityNRegression(model, (224, 224), int(n))
else:
target = category_list.index(category)
evaluator = SensitivityN(model, (224, 224), int(n))
num_samples = 0
np.random.seed(seed=42)
rand_array = np.random.rand(2000) < 0.4
for data in dataloader:
num_samples += 1
if not rand_array[num_samples]:
continue
if covid:
if regression:
input, label, filename = data
heatmap = cv2.imread(os.path.join(heatmap_dir, filename[0]), cv2.IMREAD_GRAYSCALE)
else:
input, label, filename = data
heatmap = cv2.imread(os.path.join(heatmap_dir, category, filename[0]), cv2.IMREAD_GRAYSCALE)
else:
input, label, filename, bbox = data
heatmap = cv2.imread(os.path.join(heatmap_dir, category, filename[0]), cv2.IMREAD_GRAYSCALE)
heatmap = torch.from_numpy(heatmap).to(device) / 255.0
if regression:
res_single = evaluator.evaluate(heatmap, input.squeeze().to(device), calculate_corr=True)
else:
res_single = evaluator.evaluate(heatmap, input.squeeze().to(device), target, calculate_corr=True)
corr = res_single['correlation']
# manually set NaN to zero
if np.isnan(corr):
corr = 0.
# score_diffs = res_single['score_diffs']
# sum_attrs = res_single['sum_attributions']
# score_diffs_all.append(score_diffs)
# sum_attrs_all.append(sum_attrs)
corr_category = np.append(corr_category, np.array([corr]))
corr_all = np.append(corr_all, np.array([corr]))
results.update({"{}_{}".format(passed_n, category): corr_category})
# score_diffs_all = np.concatenate(score_diffs_all, 0)
# sum_attrs_all = np.concatenate(sum_attrs_all, 0)
# corr_matrix = np.corrcoef(score_diffs_all, sum_attrs_all)
# results.update({n: corr_matrix[1, 0]})
corr_mean = corr_all.mean()
results.update({n: corr_mean})
print("corr for {} is {}".format(n, corr_mean))
mmcv.mkdir_or_exist(out_dir)
mmcv.dump(results, file=os.path.join(out_dir, file_name))
return results
if __name__ == '__main__':
args = parse_args()
results = evaluation(args.heatmap_dir, args.out_dir, args.image_path, args.model_path, args.label_path, args.file_name,
covid=args.covid, regression=args.regression, blur=args.blur, sigma=args.sigma) | [
"mmcv.mkdir_or_exist",
"sys.path.insert",
"numpy.random.rand",
"argparse.ArgumentParser",
"model.merged_visualize_prediction.load_data",
"os.path.join",
"torch.from_numpy",
"numpy.array",
"numpy.isnan",
"numpy.random.seed",
"tqdm.auto.tqdm",
"numpy.logspace"
] | [((481, 523), 'argparse.ArgumentParser', 'ArgumentParser', (['"""Sensitivity-N evaluation"""'], {}), "('Sensitivity-N evaluation')\n", (495, 523), False, 'from argparse import ArgumentParser\n'), ((2347, 2374), 'numpy.logspace', 'np.logspace', (['(0)', '(4.7)'], {'num': '(50)'}), '(0, 4.7, num=50)\n', (2358, 2374), True, 'import numpy as np\n'), ((2423, 2437), 'tqdm.auto.tqdm', 'tqdm', (['log_list'], {}), '(log_list)\n', (2427, 2437), False, 'from tqdm.auto import tqdm\n'), ((6287, 6315), 'mmcv.mkdir_or_exist', 'mmcv.mkdir_or_exist', (['out_dir'], {}), '(out_dir)\n', (6306, 6315), False, 'import mmcv\n'), ((169, 193), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (184, 193), False, 'import sys\n'), ((2536, 2548), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2544, 2548), True, 'import numpy as np\n'), ((2616, 2628), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2624, 2628), True, 'import numpy as np\n'), ((4127, 4150), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(42)'}), '(seed=42)\n', (4141, 4150), True, 'import numpy as np\n'), ((6344, 6376), 'os.path.join', 'os.path.join', (['out_dir', 'file_name'], {}), '(out_dir, file_name)\n', (6356, 6376), False, 'import os\n'), ((2731, 2864), 'model.merged_visualize_prediction.load_data', 'V.load_data', (['image_path', 'category', 'model_path', '"""BBox"""'], {'POSITIVE_FINDINGS_ONLY': '(True)', 'label_path': 'label_path', 'return_dataloader': '(True)'}), "(image_path, category, model_path, 'BBox',\n POSITIVE_FINDINGS_ONLY=True, label_path=label_path, return_dataloader=True)\n", (2742, 2864), True, 'import model.merged_visualize_prediction as V\n'), ((4176, 4196), 'numpy.random.rand', 'np.random.rand', (['(2000)'], {}), '(2000)\n', (4190, 4196), True, 'import numpy as np\n'), ((5424, 5438), 'numpy.isnan', 'np.isnan', (['corr'], {}), '(corr)\n', (5432, 5438), True, 'import numpy as np\n'), ((3078, 3246), 'model.merged_visualize_prediction.load_data', 'V.load_data', (['image_path', 'category', 'model_path', '"""test"""'], {'POSITIVE_FINDINGS_ONLY': '(False)', 'covid': '(True)', 'regression': '(True)', 'label_path': 'label_path', 'return_dataloader': '(True)'}), "(image_path, category, model_path, 'test',\n POSITIVE_FINDINGS_ONLY=False, covid=True, regression=True, label_path=\n label_path, return_dataloader=True)\n", (3089, 3246), True, 'import model.merged_visualize_prediction as V\n'), ((5750, 5766), 'numpy.array', 'np.array', (['[corr]'], {}), '([corr])\n', (5758, 5766), True, 'import numpy as np\n'), ((5815, 5831), 'numpy.array', 'np.array', (['[corr]'], {}), '([corr])\n', (5823, 5831), True, 'import numpy as np\n'), ((3499, 3667), 'model.merged_visualize_prediction.load_data', 'V.load_data', (['image_path', 'category', 'model_path', '"""test"""'], {'POSITIVE_FINDINGS_ONLY': '(True)', 'covid': '(True)', 'regression': '(False)', 'label_path': 'label_path', 'return_dataloader': '(True)'}), "(image_path, category, model_path, 'test',\n POSITIVE_FINDINGS_ONLY=True, covid=True, regression=False, label_path=\n label_path, return_dataloader=True)\n", (3510, 3667), True, 'import model.merged_visualize_prediction as V\n'), ((4887, 4935), 'os.path.join', 'os.path.join', (['heatmap_dir', 'category', 'filename[0]'], {}), '(heatmap_dir, category, filename[0])\n', (4899, 4935), False, 'import os\n'), ((4509, 4547), 'os.path.join', 'os.path.join', (['heatmap_dir', 'filename[0]'], {}), '(heatmap_dir, filename[0])\n', (4521, 4547), False, 'import os\n'), ((4696, 4744), 'os.path.join', 'os.path.join', (['heatmap_dir', 'category', 'filename[0]'], {}), '(heatmap_dir, category, filename[0])\n', (4708, 4744), False, 'import os\n'), ((4985, 5010), 'torch.from_numpy', 'torch.from_numpy', (['heatmap'], {}), '(heatmap)\n', (5001, 5010), False, 'import torch\n')] |
#!/usr/bin/env python
# coding=utf-8
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from skim import AutoModelForMaskedLM
from sklearn.metrics import classification_report
import torch
from torch import nn
from datasets import ClassLabel, load_dataset
import skim.data.datasets.docbank
import transformers
from skim.data import DataCollatorForTokenClassification
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoConfig,
AutoModelForTokenClassification,
AutoModelForMaskedLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import (
is_main_process,
get_last_checkpoint,
)
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.5.0")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None,
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
core_model_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "If training <core_model>+SkimEmbeddings or SkimmingMask, "
"path to pretrained model or model identifier from huggingface.co/models"
}
)
skim_model_name_or_path: Optional[str] = field(
default=None,
metadata={"help": "If training <core_model>+SkimEmbeddings or SkimmingMask, "
"path to pretrained Skimformer model."
}
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "Pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
core_model_type: Optional[str] = field(
default="bert",
metadata={"help": "Core model type in <core_model>+SkimEmbeddings or SkimmingMask"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
contextualize_2d_positions: bool = field(
default=False,
metadata={"help": "Contextualize the layout embeddings prior to Skim-Attention."}
)
top_k: int = field(
default=0,
metadata={"help": "If > 0, SkimmingMask keeps the k-most attended tokens for each token."}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
proxy: Optional[str] = field(
default=None,
metadata={"help": "Proxy server to use."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: Optional[str] = field(
default=None, metadata={"help": "Path to the data directory."}
)
cached_data_dir: str = field(
default=None,
metadata={"help": "Path to the cached features"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=512,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to 512."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
max_test_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of test examples to this "
"value if set."
},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
datasets = load_dataset(
os.path.abspath(skim.data.datasets.docbank.__file__),
data_dir=data_args.data_dir,
cache_dir=data_args.cached_data_dir,
)
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
text_column_name = "words" if "words" in column_names else column_names[0]
label_column_name = "tags" if "tags" in column_names else column_names[1]
remove_columns = column_names
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
model_args.model_type = model_args.model_type.lower()
model_args.core_model_type = model_args.core_model_type.lower()
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
# initialize config if not BertWithSkimEmbed or SkimmingMask
if (
model_args.model_type not in ["bertwithskimembed", "skimmingmask"]
or (
model_args.model_type in ["bertwithskimembed", "skimmingmask"]
and model_args.model_name_or_path
)
):
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
**config_kwargs,
)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.model_type in ["longformer", "longskimformer"]:
tokenizer_kwargs["add_prefix_space"] = True
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
**tokenizer_kwargs,
)
if model_args.model_type not in ["bertwithskimembed", "skimmingmask"]:
logger.info(
f"Fine-tuning {model_args.model_type} with weights initialized from "
f"{model_args.model_name_or_path}."
)
model_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
# config=config,
**model_kwargs,
)
if model.config.num_labels != config.num_labels or model.config.id2label != config.id2label:
model_architecture = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[CONFIG_MAPPING[model_args.model_type]]
if model_architecture == model.config.architectures[0]:
logger.info(
f"`model.config.num_labels` ({model.config.num_labels}) != `config.num_labels` ({config.num_labels}) "
"Reseting `model.classifier`"
)
model.classifier = nn.Linear(model.config.hidden_size, config.num_labels)
model.classifier.weight.data.normal_(mean=0.0, std=model.config.initializer_range)
model.config.num_labels = config.num_labels
model.num_labels = config.num_labels
elif model_args.model_type == "bertwithskimembed":
assert model_args.skim_model_name_or_path and model_args.core_model_name_or_path, "Must provided model checkpoints for "\
"`skim_model_name_or_path` and `core_model_name_or_path`"
logger.info(
f"Fine-tuning BertWithSkimEmbed with 2D position embeddings initialized "\
f"with weights from {model_args.skim_model_name_or_path} and " \
f"core model initialized with weights from {model_args.core_model_name_or_path}"
)
skim_model = AutoModelForMaskedLM.from_pretrained(
model_args.skim_model_name_or_path,
from_tf=bool(".ckpt" in model_args.skim_model_name_or_path),
cache_dir=model_args.cache_dir,
)
core_model = AutoModelForMaskedLM.from_pretrained(
model_args.core_model_name_or_path,
from_tf=bool(".ckpt" in model_args.core_model_name_or_path),
cache_dir=model_args.cache_dir,
)
config = CONFIG_MAPPING[model_args.model_type](
vocab_size=core_model.config.vocab_size,
hidden_size=core_model.config.hidden_size,
hidden_layout_size=skim_model.config.hidden_layout_size,
num_hidden_layers=core_model.config.num_hidden_layers,
num_attention_heads=core_model.config.num_attention_heads,
intermediate_size=core_model.config.intermediate_size,
hidden_act=core_model.config.hidden_act,
hidden_dropout_prob=core_model.config.hidden_dropout_prob,
attention_probs_dropout_prob=core_model.config.attention_probs_dropout_prob,
max_position_embeddings=core_model.config.max_position_embeddings,
type_vocab_size=core_model.config.type_vocab_size,
initializer_range=core_model.config.initializer_range,
layer_norm_eps=core_model.config.layer_norm_eps,
pad_token_id=core_model.config.pad_token_id,
gradient_checkpointing=core_model.config.gradient_checkpointing,
max_2d_position_embeddings=skim_model.config.max_2d_position_embeddings,
contextualize_2d_positions=model_args.contextualize_2d_positions,
num_hidden_layers_layout_encoder=skim_model.config.num_hidden_layers_layout_encoder,
num_attention_heads_layout_encoder=skim_model.config.num_attention_heads_layout_encoder,
num_labels=num_labels,
)
model = AutoModelForTokenClassification.from_config(config=config)
# Copy weights
with torch.no_grad():
# Copy layout embeddings from Skimformer
model.bert_with_skim_embed.embeddings.x_position_embeddings.load_state_dict(
skim_model.skimformer.two_dim_pos_embeddings.x_position_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.y_position_embeddings.load_state_dict(
skim_model.skimformer.two_dim_pos_embeddings.y_position_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.h_position_embeddings.load_state_dict(
skim_model.skimformer.two_dim_pos_embeddings.h_position_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.w_position_embeddings.load_state_dict(
skim_model.skimformer.two_dim_pos_embeddings.w_position_embeddings.state_dict()
)
# Copy text embeddings and encoder weights from core model
model.bert_with_skim_embed.embeddings.word_embeddings.load_state_dict(
core_model.bert.embeddings.word_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.position_embeddings.load_state_dict(
core_model.bert.embeddings.position_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.token_type_embeddings.load_state_dict(
core_model.bert.embeddings.token_type_embeddings.state_dict()
)
model.bert_with_skim_embed.embeddings.LayerNorm.load_state_dict(
core_model.bert.embeddings.LayerNorm.state_dict()
)
model.bert_with_skim_embed.encoder.load_state_dict(core_model.bert.encoder.state_dict())
else:
assert (
model_args.skim_model_name_or_path and model_args.core_model_name_or_path
), f"Must provide `skim_model_name_or_path` and `core_model_name_or_path` to instantiate {model_args.model_type} model."
logger.info(
f"Fine-tuning SkimmingMask with layout embeddings and Skim-Attention initialized "\
f"with weights from {model_args.skim_model_name_or_path}, " \
f"core model initialized with weights from {model_args.core_model_name_or_path}" \
f" and `top_k` = {model_args.top_k}"
)
skim_model = AutoModelForMaskedLM.from_pretrained(
model_args.skim_model_name_or_path,
from_tf=bool(".ckpt" in model_args.skim_model_name_or_path),
cache_dir=model_args.cache_dir,
)
core_model = AutoModelForMaskedLM.from_pretrained(
model_args.core_model_name_or_path,
from_tf=bool(".ckpt" in model_args.core_model_name_or_path),
cache_dir=model_args.cache_dir,
)
assert model_args.top_k > 0, "`top_k` must be > 0"
config = CONFIG_MAPPING[model_args.model_type](
vocab_size=core_model.config.vocab_size,
hidden_size=core_model.config.hidden_size,
hidden_layout_size=skim_model.config.hidden_layout_size,
num_hidden_layers=core_model.config.num_hidden_layers,
num_attention_heads=core_model.config.num_attention_heads,
num_layout_attention_heads=skim_model.config.num_attention_heads,
intermediate_size=core_model.config.intermediate_size,
hidden_act=core_model.config.hidden_act,
hidden_dropout_prob=core_model.config.hidden_dropout_prob,
attention_probs_dropout_prob=core_model.config.attention_probs_dropout_prob,
max_position_embeddings=core_model.config.max_position_embeddings,
type_vocab_size=core_model.config.type_vocab_size,
initializer_range=core_model.config.initializer_range,
layer_norm_eps=core_model.config.layer_norm_eps,
skim_attention_head_size=skim_model.config.skim_attention_head_size,
pad_token_id=core_model.config.pad_token_id,
gradient_checkpointing=core_model.config.gradient_checkpointing,
max_2d_position_embeddings=skim_model.config.max_2d_position_embeddings,
contextualize_2d_positions=skim_model.config.contextualize_2d_positions,
num_hidden_layers_layout_encoder=skim_model.config.num_hidden_layers_layout_encoder,
num_attention_heads_layout_encoder=skim_model.config.num_attention_heads_layout_encoder,
num_labels=num_labels,
top_k=model_args.top_k,
core_model_type=model_args.core_model_type,
)
model = AutoModelForTokenClassification.from_config(config)
with torch.no_grad():
# copy layout embeddings from Skimformer
model.skimming_mask_model.two_dim_pos_embeddings.load_state_dict(
skim_model.skimformer.two_dim_pos_embeddings.state_dict()
)
# copy contextualizer
if skim_model.config.contextualize_2d_positions:
logger.info("Contextualizing 2d positions")
model.skimming_mask_model.layout_encoder.load_state_dict(
skim_model.skimformer.layout_encoder.state_dict()
)
# copy Skim-Attention
model.skimming_mask_model.skim_attention.query.load_state_dict(
skim_model.skimformer.skim_attention.query.state_dict()
)
model.skimming_mask_model.skim_attention.key.load_state_dict(
skim_model.skimformer.skim_attention.key.state_dict()
)
# Copy text embeddings and encoder weights from core model
if model_args.core_model_type == "bert":
core_model_base = core_model.bert
else:
assert model_args.core_model_type == "layoutlm"
core_model_base = core_model.layoutlm
model.skimming_mask_model.embeddings.load_state_dict(
core_model_base.embeddings.state_dict()
)
model.skimming_mask_model.encoder.load_state_dict(core_model_base.encoder.state_dict())
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
return_overflowing_tokens=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
bboxes = []
for batch_index in range(len(tokenized_inputs["input_ids"])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples["bboxes"][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
bbox_inputs.append([0, 0, 0, 0])
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
tokenized_inputs["labels"] = labels
tokenized_inputs["bbox"] = bboxes
return tokenized_inputs
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
data_collator = DataCollatorForTokenClassification(
tokenizer,
padding=padding,
max_length=data_args.max_seq_length,
pad_to_multiple_of=8 if training_args.fp16 else None,
use_2d_attn_mask=(model_args.model_type == "skimmingmask"),
)
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Map from label_id to label
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
# flatten lists
flat_true_predictions = [pred for sublist in true_predictions for pred in sublist]
flat_true_labels = [label for sublist in true_labels for label in sublist]
labels_to_detect = np.unique(flat_true_labels)
report = classification_report(
flat_true_labels,
flat_true_predictions,
labels=labels_to_detect,
output_dict=True,
zero_division=0,
)
results = {}
for key_label, value_label in sorted(report.items()):
if type(value_label) != dict:
results[key_label] = value_label
else:
for key_metric, value_metric in value_label.items():
results[key_label + "_" + key_metric] = value_metric
return results
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = last_checkpoint if last_checkpoint else None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"transformers.HfArgumentParser",
"sklearn.metrics.classification_report",
"transformers.utils.check_min_version",
"transformers.AutoTokenizer.from_pretrained",
"transformers.Trainer",
"transformers.trainer_utils.get_last_checkpoint",
"transformers.MODEL_... | [((946, 972), 'transformers.utils.check_min_version', 'check_min_version', (['"""4.5.0"""'], {}), "('4.5.0')\n", (963, 972), False, 'from transformers.utils import check_min_version\n'), ((983, 1010), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1000, 1010), False, 'import logging\n'), ((1040, 1085), 'transformers.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys', ([], {}), '()\n', (1083, 1085), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((1328, 1445), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'Path to pretrained model or model identifier from huggingface.co/models'}"}), "(default=None, metadata={'help':\n 'Path to pretrained model or model identifier from huggingface.co/models'})\n", (1333, 1445), False, 'from dataclasses import dataclass, field\n'), ((1509, 1688), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'If training <core_model>+SkimEmbeddings or SkimmingMask, path to pretrained model or model identifier from huggingface.co/models'\n }"}), "(default=None, metadata={'help':\n 'If training <core_model>+SkimEmbeddings or SkimmingMask, path to pretrained model or model identifier from huggingface.co/models'\n })\n", (1514, 1688), False, 'from dataclasses import dataclass, field\n'), ((1771, 1915), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'If training <core_model>+SkimEmbeddings or SkimmingMask, path to pretrained Skimformer model.'\n }"}), "(default=None, metadata={'help':\n 'If training <core_model>+SkimEmbeddings or SkimmingMask, path to pretrained Skimformer model.'\n })\n", (1776, 1915), False, 'from dataclasses import dataclass, field\n'), ((2146, 2256), 'dataclasses.field', 'field', ([], {'default': '"""bert"""', 'metadata': "{'help': 'Core model type in <core_model>+SkimEmbeddings or SkimmingMask'}"}), "(default='bert', metadata={'help':\n 'Core model type in <core_model>+SkimEmbeddings or SkimmingMask'})\n", (2151, 2256), False, 'from dataclasses import dataclass, field\n'), ((2308, 2414), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'Pretrained config name or path if not the same as model_name'}"}), "(default=None, metadata={'help':\n 'Pretrained config name or path if not the same as model_name'})\n", (2313, 2414), False, 'from dataclasses import dataclass, field\n'), ((2461, 2570), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'Pretrained tokenizer name or path if not the same as model_name'}"}), "(default=None, metadata={'help':\n 'Pretrained tokenizer name or path if not the same as model_name'})\n", (2466, 2570), False, 'from dataclasses import dataclass, field\n'), ((2612, 2742), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'Where do you want to store the pretrained models downloaded from huggingface.co'\n }"}), "(default=None, metadata={'help':\n 'Where do you want to store the pretrained models downloaded from huggingface.co'\n })\n", (2617, 2742), False, 'from dataclasses import dataclass, field\n'), ((2788, 2922), 'dataclasses.field', 'field', ([], {'default': '(True)', 'metadata': "{'help':\n 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'\n }"}), "(default=True, metadata={'help':\n 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'\n })\n", (2793, 2922), False, 'from dataclasses import dataclass, field\n'), ((2976, 3083), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'help': 'Contextualize the layout embeddings prior to Skim-Attention.'}"}), "(default=False, metadata={'help':\n 'Contextualize the layout embeddings prior to Skim-Attention.'})\n", (2981, 3083), False, 'from dataclasses import dataclass, field\n'), ((3120, 3232), 'dataclasses.field', 'field', ([], {'default': '(0)', 'metadata': "{'help':\n 'If > 0, SkimmingMask keeps the k-most attended tokens for each token.'}"}), "(default=0, metadata={'help':\n 'If > 0, SkimmingMask keeps the k-most attended tokens for each token.'})\n", (3125, 3232), False, 'from dataclasses import dataclass, field\n'), ((3278, 3411), 'dataclasses.field', 'field', ([], {'default': '"""main"""', 'metadata': "{'help':\n 'The specific model version to use (can be a branch name, tag name or commit id).'\n }"}), "(default='main', metadata={'help':\n 'The specific model version to use (can be a branch name, tag name or commit id).'\n })\n", (3283, 3411), False, 'from dataclasses import dataclass, field\n'), ((3453, 3623), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'help':\n 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'\n }"}), "(default=False, metadata={'help':\n 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'\n })\n", (3458, 3623), False, 'from dataclasses import dataclass, field\n'), ((3702, 3764), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'Proxy server to use.'}"}), "(default=None, metadata={'help': 'Proxy server to use.'})\n", (3707, 3764), False, 'from dataclasses import dataclass, field\n'), ((3974, 4043), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'Path to the data directory.'}"}), "(default=None, metadata={'help': 'Path to the data directory.'})\n", (3979, 4043), False, 'from dataclasses import dataclass, field\n'), ((4085, 4154), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'Path to the cached features'}"}), "(default=None, metadata={'help': 'Path to the cached features'})\n", (4090, 4154), False, 'from dataclasses import dataclass, field\n'), ((4205, 4301), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'help': 'Overwrite the cached training and evaluation sets'}"}), "(default=False, metadata={'help':\n 'Overwrite the cached training and evaluation sets'})\n", (4210, 4301), False, 'from dataclasses import dataclass, field\n'), ((4359, 4458), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help': 'The number of processes to use for the preprocessing.'}"}), "(default=None, metadata={'help':\n 'The number of processes to use for the preprocessing.'})\n", (4364, 4458), False, 'from dataclasses import dataclass, field\n'), ((4504, 4693), 'dataclasses.field', 'field', ([], {'default': '(512)', 'metadata': "{'help':\n 'Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to 512.'\n }"}), "(default=512, metadata={'help':\n 'Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to 512.'\n })\n", (4509, 4693), False, 'from dataclasses import dataclass, field\n'), ((4790, 5038), 'dataclasses.field', 'field', ([], {'default': '(True)', 'metadata': "{'help':\n 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'\n }"}), "(default=True, metadata={'help':\n 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'\n })\n", (4795, 5038), False, 'from dataclasses import dataclass, field\n'), ((5144, 5301), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'\n }"}), "(default=None, metadata={'help':\n 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'\n })\n", (5149, 5301), False, 'from dataclasses import dataclass, field\n'), ((5390, 5549), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'\n }"}), "(default=None, metadata={'help':\n 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'\n })\n", (5395, 5549), False, 'from dataclasses import dataclass, field\n'), ((5639, 5792), 'dataclasses.field', 'field', ([], {'default': 'None', 'metadata': "{'help':\n 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'\n }"}), "(default=None, metadata={'help':\n 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'\n })\n", (5644, 5792), False, 'from dataclasses import dataclass, field\n'), ((5873, 6081), 'dataclasses.field', 'field', ([], {'default': '(False)', 'metadata': "{'help':\n 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'\n }"}), "(default=False, metadata={'help':\n 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'\n })\n", (5878, 6081), False, 'from dataclasses import dataclass, field\n'), ((6362, 6438), 'transformers.HfArgumentParser', 'HfArgumentParser', (['(ModelArguments, DataTrainingArguments, TrainingArguments)'], {}), '((ModelArguments, DataTrainingArguments, TrainingArguments))\n', (6378, 6438), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((8364, 8405), 'transformers.trainer_utils.is_main_process', 'is_main_process', (['training_args.local_rank'], {}), '(training_args.local_rank)\n', (8379, 8405), False, 'from transformers.trainer_utils import is_main_process, get_last_checkpoint\n'), ((8698, 8726), 'transformers.set_seed', 'set_seed', (['training_args.seed'], {}), '(training_args.seed)\n', (8706, 8726), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((11584, 11729), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['(model_args.tokenizer_name if model_args.tokenizer_name else model_args.\n model_name_or_path)'], {}), '(model_args.tokenizer_name if model_args.\n tokenizer_name else model_args.model_name_or_path, **tokenizer_kwargs)\n', (11613, 11729), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((26142, 26362), 'skim.data.DataCollatorForTokenClassification', 'DataCollatorForTokenClassification', (['tokenizer'], {'padding': 'padding', 'max_length': 'data_args.max_seq_length', 'pad_to_multiple_of': '(8 if training_args.fp16 else None)', 'use_2d_attn_mask': "(model_args.model_type == 'skimmingmask')"}), "(tokenizer, padding=padding, max_length=\n data_args.max_seq_length, pad_to_multiple_of=8 if training_args.fp16 else\n None, use_2d_attn_mask=model_args.model_type == 'skimmingmask')\n", (26176, 26362), False, 'from skim.data import DataCollatorForTokenClassification\n'), ((27832, 28095), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'training_args', 'train_dataset': '(train_dataset if training_args.do_train else None)', 'eval_dataset': '(eval_dataset if training_args.do_eval else None)', 'tokenizer': 'tokenizer', 'data_collator': 'data_collator', 'compute_metrics': 'compute_metrics'}), '(model=model, args=training_args, train_dataset=train_dataset if\n training_args.do_train else None, eval_dataset=eval_dataset if\n training_args.do_eval else None, tokenizer=tokenizer, data_collator=\n data_collator, compute_metrics=compute_metrics)\n', (27839, 28095), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((6906, 6945), 'os.path.isdir', 'os.path.isdir', (['training_args.output_dir'], {}), '(training_args.output_dir)\n', (6919, 6945), False, 'import os\n'), ((7043, 7088), 'transformers.trainer_utils.get_last_checkpoint', 'get_last_checkpoint', (['training_args.output_dir'], {}), '(training_args.output_dir)\n', (7062, 7088), False, 'from transformers.trainer_utils import is_main_process, get_last_checkpoint\n'), ((8415, 8462), 'transformers.utils.logging.set_verbosity_info', 'transformers.utils.logging.set_verbosity_info', ([], {}), '()\n', (8460, 8462), False, 'import transformers\n'), ((8471, 8522), 'transformers.utils.logging.enable_default_handler', 'transformers.utils.logging.enable_default_handler', ([], {}), '()\n', (8520, 8522), False, 'import transformers\n'), ((8531, 8582), 'transformers.utils.logging.enable_explicit_format', 'transformers.utils.logging.enable_explicit_format', ([], {}), '()\n', (8580, 8582), False, 'import transformers\n'), ((8765, 8817), 'os.path.abspath', 'os.path.abspath', (['skim.data.datasets.docbank.__file__'], {}), '(skim.data.datasets.docbank.__file__)\n', (8780, 8817), False, 'import os\n'), ((11002, 11163), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['(model_args.config_name if model_args.config_name else model_args.\n model_name_or_path)'], {'num_labels': 'num_labels'}), '(model_args.config_name if model_args.config_name\n else model_args.model_name_or_path, num_labels=num_labels, **config_kwargs\n )\n', (11028, 11163), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((26486, 26516), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(2)'}), '(predictions, axis=2)\n', (26495, 26516), True, 'import numpy as np\n'), ((27191, 27218), 'numpy.unique', 'np.unique', (['flat_true_labels'], {}), '(flat_true_labels)\n', (27200, 27218), True, 'import numpy as np\n'), ((27237, 27364), 'sklearn.metrics.classification_report', 'classification_report', (['flat_true_labels', 'flat_true_predictions'], {'labels': 'labels_to_detect', 'output_dict': '(True)', 'zero_division': '(0)'}), '(flat_true_labels, flat_true_predictions, labels=\n labels_to_detect, output_dict=True, zero_division=0)\n', (27258, 27364), False, 'from sklearn.metrics import classification_report\n'), ((29379, 29409), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(2)'}), '(predictions, axis=2)\n', (29388, 29409), True, 'import numpy as np\n'), ((29797, 29859), 'os.path.join', 'os.path.join', (['training_args.output_dir', '"""test_predictions.txt"""'], {}), "(training_args.output_dir, 'test_predictions.txt')\n", (29809, 29859), False, 'import os\n'), ((7913, 7954), 'transformers.trainer_utils.is_main_process', 'is_main_process', (['training_args.local_rank'], {}), '(training_args.local_rank)\n', (7928, 7954), False, 'from transformers.trainer_utils import is_main_process, get_last_checkpoint\n'), ((12976, 13030), 'torch.nn.Linear', 'nn.Linear', (['model.config.hidden_size', 'config.num_labels'], {}), '(model.config.hidden_size, config.num_labels)\n', (12985, 13030), False, 'from torch import nn\n'), ((15734, 15792), 'transformers.AutoModelForTokenClassification.from_config', 'AutoModelForTokenClassification.from_config', ([], {'config': 'config'}), '(config=config)\n', (15777, 15792), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((20455, 20506), 'transformers.AutoModelForTokenClassification.from_config', 'AutoModelForTokenClassification.from_config', (['config'], {}), '(config)\n', (20498, 20506), False, 'from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoModelForMaskedLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed\n'), ((6714, 6742), 'os.path.abspath', 'os.path.abspath', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (6729, 6742), False, 'import os\n'), ((7835, 7868), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (7856, 7868), False, 'import logging\n'), ((15830, 15845), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15843, 15845), False, 'import torch\n'), ((20521, 20536), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20534, 20536), False, 'import torch\n'), ((7132, 7168), 'os.listdir', 'os.listdir', (['training_args.output_dir'], {}), '(training_args.output_dir)\n', (7142, 7168), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation, Input, Conv2D
from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape
from keras.layers import SimpleRNN
from keras import initializers
from keras.optimizers import RMSprop
from keras import backend as K
import numpy as np
from keras.models import Model as KerasModel
batch_size = 32
num_classes = 10
epochs = 2
# hidden_units = 100
hidden_units = 10
learning_rate = 1e-6
clip_norm = 1.0
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], -1, 1)
x_test = x_test.reshape(x_test.shape[0], -1, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
input_shape = (256,256,3)
print('Evaluate IRNN...')
# model = Sequential()
# model.add(SimpleRNN(hidden_units,
# kernel_initializer=initializers.RandomNormal(stddev=0.001),
# recurrent_initializer=initializers.Identity(gain=1.0),
# activation='relu',
# input_shape=x_train.shape[1:]))
# model.add(Dense(num_classes))
# model.add(Activation('softmax'))
x = Input(shape = (256, 256, 3))
x2 = Conv2D(16, (5, 5), padding='same', activation = 'relu')(x)
x2 = BatchNormalization()(x2)
x2 = MaxPooling2D(pool_size=(2, 2), padding='same')(x2)
# y = Flatten()(x2)
x3 = Reshape((-1, 16))(x2)
y = SimpleRNN(hidden_units,
kernel_initializer=initializers.RandomNormal(stddev=0.001),
recurrent_initializer=initializers.Identity(gain=1.0),
activation='relu', input_shape=(3,128*128,16))(x3)
# model.add(SimpleRNN(hidden_units,
# kernel_initializer=initializers.RandomNormal(stddev=0.001),
# recurrent_initializer=initializers.Identity(gain=1.0),
# activation='relu'))
# input_shape=x_train.shape[1:]))
# model.add(Dense(1))
model = KerasModel(inputs = x, outputs = y)
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers[1:]] # all layer outputs
functors = [K.function([inp], [out]) for out in outputs]
test = np.random.random(input_shape)[np.newaxis,...]
layer_outs = [func([test]) for func in functors]
# print (layer_outs)
print(outputs[-1])
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# verbose=1,
# validation_data=(x_test, y_test))
# scores = model.evaluate(x_test, y_test, verbose=0)
# print('IRNN test score:', scores[0])
# print('IRNN test accuracy:', scores[1]) | [
"keras.layers.Conv2D",
"keras.backend.function",
"keras.datasets.mnist.load_data",
"keras.layers.MaxPooling2D",
"numpy.random.random",
"keras.initializers.Identity",
"keras.utils.to_categorical",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.Reshape",
"keras.layers.BatchNormalization... | [((761, 778), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (776, 778), False, 'from keras.datasets import mnist\n'), ((1157, 1205), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (1183, 1205), False, 'import keras\n'), ((1215, 1262), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (1241, 1262), False, 'import keras\n'), ((1702, 1728), 'keras.layers.Input', 'Input', ([], {'shape': '(256, 256, 3)'}), '(shape=(256, 256, 3))\n', (1707, 1728), False, 'from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape\n'), ((2448, 2479), 'keras.models.Model', 'KerasModel', ([], {'inputs': 'x', 'outputs': 'y'}), '(inputs=x, outputs=y)\n', (2458, 2479), True, 'from keras.models import Model as KerasModel\n'), ((2494, 2519), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2501, 2519), False, 'from keras.optimizers import RMSprop\n'), ((1736, 1789), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(5, 5)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, (5, 5), padding='same', activation='relu')\n", (1742, 1789), False, 'from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape\n'), ((1800, 1820), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1818, 1820), False, 'from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape\n'), ((1830, 1876), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (1842, 1876), False, 'from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape\n'), ((1906, 1923), 'keras.layers.Reshape', 'Reshape', (['(-1, 16)'], {}), '((-1, 16))\n', (1913, 1923), False, 'from keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Concatenate, LeakyReLU, GlobalMaxPooling2D, Reshape\n'), ((2814, 2838), 'keras.backend.function', 'K.function', (['[inp]', '[out]'], {}), '([inp], [out])\n', (2824, 2838), True, 'from keras import backend as K\n'), ((2868, 2897), 'numpy.random.random', 'np.random.random', (['input_shape'], {}), '(input_shape)\n', (2884, 2897), True, 'import numpy as np\n'), ((1987, 2026), 'keras.initializers.RandomNormal', 'initializers.RandomNormal', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (2012, 2026), False, 'from keras import initializers\n'), ((2062, 2093), 'keras.initializers.Identity', 'initializers.Identity', ([], {'gain': '(1.0)'}), '(gain=1.0)\n', (2083, 2093), False, 'from keras import initializers\n')] |
import tensorflow as tf
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow_graphics.math.interpolation import bspline
def get_trajectories(dataset):
trajectories = []
avails = []
object_types = []
for i, batch in enumerate(dataset):
future_states = tf.squeeze(batch['gt_future_states'], axis = 1)[:, 11:, :2]
future_is_valid = tf.squeeze(batch['gt_future_is_valid'], axis = 1)[:, 11:]
x = batch['x']
y = batch['y']
yaw = batch['yaw']
x = tf.squeeze(x, axis = 1)
y = tf.squeeze(y, axis = 1)
yaw = tf.squeeze(yaw, axis = 1)
c = tf.math.cos(yaw)
s = tf.math.sin(yaw)
object_type = tf.squeeze(batch['object_type'], axis = 1)
future_x = future_states[:, :, 0] # (B, 80)
future_y = future_states[:, :, 1] # (B, 80)
future_x_hat = future_x - x # (B, 80)
future_y_hat = future_y - y # (B, 80)
future_ego_x = c * future_x_hat + s * future_y_hat # (B, 80)
future_ego_y = -s * future_x_hat + c * future_y_hat # (B, 80)
future_states = tf.stack([future_ego_x, future_ego_y], axis = -1)
trajectories.append(future_states)
avails.append(future_is_valid)
object_types.append(object_type)
if i % 1000 == 0:
print(i)
trajectories = tf.concat(trajectories, axis = 0)
avails = tf.concat(avails, axis = 0)
object_types = tf.concat(object_types, axis = 0)
trajectories = trajectories.numpy()
avails = avails.numpy()
object_types = object_types.numpy()
np.save("drive/MyDrive/Motion/trajectories.npy", trajectories)
np.save("drive/MyDrive/Motion/avails.npy", avails)
np.save("drive/MyDrive/Motion/object_types.npy", object_types)
return trajectories, avails, object_types
def cluster(trajectories, avails, K = 8, num_iters = 30):
num = trajectories.shape[1]
trajectories = trajectories.copy().reshape([-1, 2*num])
avails = avails.reshape([-1, num, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 2*num])
centroids = trajectories.copy()[0:K*17:17,:] # (8, 160)
for iteration in range(num_iters):
assignments = m_step(trajectories, avails, centroids)
e_step(trajectories, avails, centroids, assignments)
return assignments, centroids, trajectories, avails
def chunked_cluster(trajectories, avails, initial_centroids = None, K = 8, num_iters = 30, chunk_size=250000):
num = int(trajectories.shape[1])
trajectories = trajectories.copy().reshape([-1, 2*num])
avails = avails.reshape([-1, num, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 2*num])
if initial_centroids is not None:
centroids = initial_centroids.copy()
else:
centroids = trajectories.copy()[0:K*17:17,:] # (8, 160)
N = len(trajectories)
for iteration in range(num_iters):
print(iteration)
assignments_list = []
for i in range(0, N, chunk_size):
j = min(i + chunk_size, N)
assignments_list.append(m_step(trajectories[i:j], avails[i:j], centroids))
assignments = np.concatenate(assignments_list, axis = 0)
e_step(trajectories, avails, centroids, assignments)
return assignments, centroids, trajectories, avails
def m_step(trajectories, avails, centroids):
"""
Parameters:
trajectories: nparray of shape (B, 160)
avails: nparray of shape (B, 160)
centroids: nparray of shape (8, 160)
Returns:
assignments: nparray of shape(B,)(Each trajectory has an assignment to a cluster)
"""
K = len(centroids)
num = trajectories.shape[1]//2
assert num != 160, "num is 160"
a = trajectories.reshape([-1, 1, 2*num])
b = centroids.reshape([1, K, 2*num])
reshaped_avails = avails.reshape([-1, 1, 2*num])
distance = ((a-b)**2)*reshaped_avails # (B, 8, 160)
distance = np.sum(distance, axis = 2) # (B, 8)
assignments = np.argmin(distance, axis = 1) # (B,)
print('total cost:', np.sum(np.min(distance, axis = 1).astype(np.float64)))
return assignments
def e_step(trajectories, avails, centroids, assignments, K = 8):
"""
Parameters:
trajectories: nparray of shape (B, 160)
avails: nparray of shape (B, 160)
centroids: nparray of shape (8, 160)
assignments: nparray of shape(B,)(Each trajectory has an assignment to a cluster)
Returns:
None: centroids are changed in place.
"""
K = len(centroids)
for i in range(K):
members = np.where(assignments == i)
member_trajectories = trajectories[members] # (C, 160)
member_avails = avails[members] # (C, 160)
sum_trajectory = np.sum(member_trajectories*member_avails, axis = 0) # (160,)
sum_avails = np.sum(member_avails, axis = 0) + 1e-6 # (160,)
centroids[i] = sum_trajectory/sum_avails
def visualize_clusters(assignments, centroids, avails):
colors = [(255, 0, 0),
(255, 255, 0),
(255, 255, 255),
(0,255, 255),
(0,255,0),
(0,0,255),
(255,0,255),
(255, 255, 100)]
for i in range(len(centroids)):
indices = np.where(assignments == i)[0]
centroid_avails = np.any(avails[indices], axis = 0)
print(f"the {i}th cluster has this many members:{len(indices)}")
trajectory = centroids[i][centroid_avails].reshape([-1, 2]).astype(np.int64)*2 + 112
image = np.zeros((224, 448, 3))
cv2.polylines(image, [trajectory], False, color = colors[i%8])
plt.imshow(image/255)
plt.show()
def visualize_trajectories(trajectories, avails, indices): # trajectories has shape (B, 160)
colors = [(255, 0, 0),
(255, 255, 0),
(255, 255, 255),
(0,255, 255),
(0,255,0),
(0,0,255),
(255,0,255),
(255, 255, 100)]
image = np.zeros((224,448,3))
for index in indices:
track_trajectory = trajectories[index]
track_avail = avails[index]
track_trajectory = track_trajectory[track_avail]
track_trajectory = 2*track_trajectory.reshape([1,-1,2]).astype(np.int64) + 112
cv2.polylines(image, track_trajectory, False, color = colors[index % 8])
plt.imshow(image/255)
plt.show()
def inspect_trajectory(dataset, index, batch_size = 32):
batch_index = index//batch_size
index_within_batch = index % batch_size
for i, batch in enumerate(dataset):
if i < batch_index:
continue
image = batch['image'][index_within_batch]
future_states = tf.squeeze(batch['gt_future_states'], axis = 1)[:, 11:, :2]
future_is_valid = tf.squeeze(batch['gt_future_is_valid'], axis = 1)[:, 11:] # (B, 80)
x = batch['x']
y = batch['y']
yaw = batch['yaw']
x = tf.squeeze(x, axis = 1)
y = tf.squeeze(y, axis = 1)
yaw = tf.squeeze(yaw, axis = 1)
c = tf.math.cos(yaw)
s = tf.math.sin(yaw)
future_x = future_states[:, :, 0] # (B, 80)
future_y = future_states[:, :, 1] # (B, 80)
future_x_hat = future_x - x # (B, 80)
future_y_hat = future_y - y # (B, 80)
future_ego_x = c * future_x_hat + s * future_y_hat # (B, 80)
future_ego_y = -s * future_x_hat + c * future_y_hat # (B, 80)
future_states = tf.stack([future_ego_x, future_ego_y], axis = -1) # (B, 80, 2)
trajectory = future_states[index_within_batch].numpy()
avails = future_is_valid[index_within_batch].numpy()
trajectory = (2.5*trajectory[avails]).astype(np.int64) + 112
image = image.numpy()
image = np.zeros((224,448,3))
cv2.polylines(image, [trajectory], False, color = (0,255,0))
plt.imshow(image/255)
plt.show()
break
def smooth(trajectories, avails, centroids, assignments):
"""
Arguments:
trajectories: nparray of shape (X, 80, 2)
avails: nparray of shape (X, 80)
centroids: nparray of shape (n, 160)
assignments: nparray of shape (X,)
Returns:
new_centroids: nparray of shape (n, 160)
"""
n = len(centroids)
new_centroids = np.zeros((n, 160))
histories = []
for i in range(n):
print(i)
initial_knots = tf.convert_to_tensor(centroids[i].reshape([80, 2])[9::10])
model = get_cluster_model(initial_knots)
opt = tf.keras.optimizers.SGD(learning_rate=10)
model.compile(opt, loss=cluster_loss)
current_trajectories = trajectories[assignments==i]
current_avails = avails[assignments==i]
output = np.stack([current_trajectories, np.stack([current_avails, current_avails], axis=-1)], axis=1)
num_examples = len(current_trajectories)
history = model.fit(x = np.zeros((num_examples,)), y=output, batch_size=num_examples, epochs=100, verbose=0)
histories.append(history)
print("loss", history.history["loss"][-1])
current_centroid = model(np.array([0])).numpy()
new_centroids[i] = current_centroid.reshape([160,])
visualize_centroids(new_centroids)
return new_centroids
def cluster_and_get_all_avails(filtered_trajectories, filtered_avails, K, num_iters, chunk_size):
assignments_K, centroids_K, trajectories_K, avails_K = chunked_cluster(filtered_trajectories, filtered_avails, K=K, num_iters=num_iters, chunk_size=chunk_size)
np.save("drive/MyDrive/Motion/clusters/filtered_veh_64.npy", centroids_K)
np.save("drive/MyDrive/Motion/clusters/filtered_assignments_64.npy", assignments_K)
all_avails_K = []
for i in range(K):
all_avails_K.append(avails_K[np.where(assignments_K==i)])
return assignments_K, centroids_K, trajectories_K, avails_K, all_avails
def visualize_centroids(centroids, all_avails=None):
"""
Call Arguments:
centroids: (K, 160)
all_avails: python list of nparrays of shape (B, 80)
"""
K = len(centroids)
num = centroids.shape[1]//2
for i in range(K):
if all_avails != None:
avails = np.any(all_avails[i], axis=0)
centroid = (2.5*centroids[i].reshape([num, 2])[avails]).astype(np.int32) + 112
else:
centroid = (2.5*centroids[i].reshape([num, 2])).astype(np.int32) + 112
print(i)
image = np.zeros((224, 448, 3))
cv2.polylines(image, [centroid], False, (255, 255, 255))
for pt in centroid[::4]:
cv2.circle(image, (pt[0], pt[1]), 1, (255, 0, 0))
plt.figure(figsize=(10, 20))
plt.imshow(image/255)
plt.show()
def chunked_m_step(trajectories, avails, centroids, chunk_size=125000):
N = len(trajectories)
trajectories = trajectories.copy().reshape([-1, 160])
avails = avails.reshape([-1, 80, 1])
avails = np.concatenate((avails, avails), axis = 2)
avails = avails.reshape([-1, 160])
assignments_list = []
for i in range(0, N, chunk_size):
j = min(i + chunk_size, N)
assignments_list.append(m_step(trajectories[i:j], avails[i:j], centroids))
assignments = np.concatenate(assignments_list, axis = 0)
return assignments
def get_cluster_model(initial_knots):
"""
initial_knots: tensor of shape (8, 2)
"""
dummy_input = tf.keras.layers.Input(shape = (1,))
knots = tf.keras.layers.Dense(16)(dummy_input)
knots = tf.reshape(knots, (-1, 1, 2, 8)) # (B, 1, 2, 8)
initial_knots = initial_knots[tf.newaxis, tf.newaxis, :, :]
knots = knots + tf.transpose(initial_knots, [0, 1, 3, 2])
max_pos = 8 - 3
positions = tf.expand_dims(tf.range(start = 0.0, limit = max_pos, delta = max_pos/80, dtype= knots.dtype), axis = -1)
spline = bspline.interpolate(knots, positions, 3, False)
spline = tf.squeeze(spline, axis = 1)
pred = tf.transpose(spline, perm = [1,2,0,3]) # (B, K, 80, 2)
pred = tf.reshape(pred, [-1, 80, 2])
model = tf.keras.Model(inputs=[dummy_input], outputs =[pred])
return model
def cluster_loss(y_true, y_pred):
return tf.reduce_mean(((y_true[:, 0] - y_pred)**2) * y_true[:, 1])
def show_trajectory(trajectory):
"""
trajectory: of shape (80, 2) or (160) or (1, 80, 2)
"""
image = np.zeros((224, 448, 3))
pts = (2.5*trajectory.reshape([80, 2])).astype(np.int32) + 112
cv2.polylines(image, pts, False, (255, 255, 255))
for pt in pts:
cv2.circle(image, (pt[0], pt[1]), 1, (255, 0, 0))
plt.figure(figsize=(10, 20))
plt.imshow(image)
plt.show() | [
"tensorflow.transpose",
"tensorflow.math.cos",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"numpy.save",
"matplotlib.pyplot.imshow",
"tensorflow.keras.layers.Input",
"numpy.where",
"tensorflow.keras.optimizers.SGD",
"tensorflow.concat",
"numpy.stack",
"numpy.con... | [((1258, 1289), 'tensorflow.concat', 'tf.concat', (['trajectories'], {'axis': '(0)'}), '(trajectories, axis=0)\n', (1267, 1289), True, 'import tensorflow as tf\n'), ((1303, 1328), 'tensorflow.concat', 'tf.concat', (['avails'], {'axis': '(0)'}), '(avails, axis=0)\n', (1312, 1328), True, 'import tensorflow as tf\n'), ((1348, 1379), 'tensorflow.concat', 'tf.concat', (['object_types'], {'axis': '(0)'}), '(object_types, axis=0)\n', (1357, 1379), True, 'import tensorflow as tf\n'), ((1486, 1548), 'numpy.save', 'np.save', (['"""drive/MyDrive/Motion/trajectories.npy"""', 'trajectories'], {}), "('drive/MyDrive/Motion/trajectories.npy', trajectories)\n", (1493, 1548), True, 'import numpy as np\n'), ((1551, 1601), 'numpy.save', 'np.save', (['"""drive/MyDrive/Motion/avails.npy"""', 'avails'], {}), "('drive/MyDrive/Motion/avails.npy', avails)\n", (1558, 1601), True, 'import numpy as np\n'), ((1604, 1666), 'numpy.save', 'np.save', (['"""drive/MyDrive/Motion/object_types.npy"""', 'object_types'], {}), "('drive/MyDrive/Motion/object_types.npy', object_types)\n", (1611, 1666), True, 'import numpy as np\n'), ((1909, 1949), 'numpy.concatenate', 'np.concatenate', (['(avails, avails)'], {'axis': '(2)'}), '((avails, avails), axis=2)\n', (1923, 1949), True, 'import numpy as np\n'), ((2511, 2551), 'numpy.concatenate', 'np.concatenate', (['(avails, avails)'], {'axis': '(2)'}), '((avails, avails), axis=2)\n', (2525, 2551), True, 'import numpy as np\n'), ((3767, 3791), 'numpy.sum', 'np.sum', (['distance'], {'axis': '(2)'}), '(distance, axis=2)\n', (3773, 3791), True, 'import numpy as np\n'), ((3819, 3846), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (3828, 3846), True, 'import numpy as np\n'), ((5692, 5715), 'numpy.zeros', 'np.zeros', (['(224, 448, 3)'], {}), '((224, 448, 3))\n', (5700, 5715), True, 'import numpy as np\n'), ((6028, 6051), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image / 255)'], {}), '(image / 255)\n', (6038, 6051), True, 'import matplotlib.pyplot as plt\n'), ((6052, 6062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6060, 6062), True, 'import matplotlib.pyplot as plt\n'), ((7803, 7821), 'numpy.zeros', 'np.zeros', (['(n, 160)'], {}), '((n, 160))\n', (7811, 7821), True, 'import numpy as np\n'), ((8964, 9037), 'numpy.save', 'np.save', (['"""drive/MyDrive/Motion/clusters/filtered_veh_64.npy"""', 'centroids_K'], {}), "('drive/MyDrive/Motion/clusters/filtered_veh_64.npy', centroids_K)\n", (8971, 9037), True, 'import numpy as np\n'), ((9040, 9127), 'numpy.save', 'np.save', (['"""drive/MyDrive/Motion/clusters/filtered_assignments_64.npy"""', 'assignments_K'], {}), "('drive/MyDrive/Motion/clusters/filtered_assignments_64.npy',\n assignments_K)\n", (9047, 9127), True, 'import numpy as np\n'), ((10260, 10300), 'numpy.concatenate', 'np.concatenate', (['(avails, avails)'], {'axis': '(2)'}), '((avails, avails), axis=2)\n', (10274, 10300), True, 'import numpy as np\n'), ((10526, 10566), 'numpy.concatenate', 'np.concatenate', (['assignments_list'], {'axis': '(0)'}), '(assignments_list, axis=0)\n', (10540, 10566), True, 'import numpy as np\n'), ((10705, 10738), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (10726, 10738), True, 'import tensorflow as tf\n'), ((10805, 10837), 'tensorflow.reshape', 'tf.reshape', (['knots', '(-1, 1, 2, 8)'], {}), '(knots, (-1, 1, 2, 8))\n', (10815, 10837), True, 'import tensorflow as tf\n'), ((11134, 11181), 'tensorflow_graphics.math.interpolation.bspline.interpolate', 'bspline.interpolate', (['knots', 'positions', '(3)', '(False)'], {}), '(knots, positions, 3, False)\n', (11153, 11181), False, 'from tensorflow_graphics.math.interpolation import bspline\n'), ((11195, 11221), 'tensorflow.squeeze', 'tf.squeeze', (['spline'], {'axis': '(1)'}), '(spline, axis=1)\n', (11205, 11221), True, 'import tensorflow as tf\n'), ((11235, 11274), 'tensorflow.transpose', 'tf.transpose', (['spline'], {'perm': '[1, 2, 0, 3]'}), '(spline, perm=[1, 2, 0, 3])\n', (11247, 11274), True, 'import tensorflow as tf\n'), ((11301, 11330), 'tensorflow.reshape', 'tf.reshape', (['pred', '[-1, 80, 2]'], {}), '(pred, [-1, 80, 2])\n', (11311, 11330), True, 'import tensorflow as tf\n'), ((11343, 11395), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[dummy_input]', 'outputs': '[pred]'}), '(inputs=[dummy_input], outputs=[pred])\n', (11357, 11395), True, 'import tensorflow as tf\n'), ((11458, 11517), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((y_true[:, 0] - y_pred) ** 2 * y_true[:, 1])'], {}), '((y_true[:, 0] - y_pred) ** 2 * y_true[:, 1])\n', (11472, 11517), True, 'import tensorflow as tf\n'), ((11628, 11651), 'numpy.zeros', 'np.zeros', (['(224, 448, 3)'], {}), '((224, 448, 3))\n', (11636, 11651), True, 'import numpy as np\n'), ((11719, 11768), 'cv2.polylines', 'cv2.polylines', (['image', 'pts', '(False)', '(255, 255, 255)'], {}), '(image, pts, False, (255, 255, 255))\n', (11732, 11768), False, 'import cv2\n'), ((11842, 11870), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 20)'}), '(figsize=(10, 20))\n', (11852, 11870), True, 'import matplotlib.pyplot as plt\n'), ((11873, 11890), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (11883, 11890), True, 'import matplotlib.pyplot as plt\n'), ((11893, 11903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11901, 11903), True, 'import matplotlib.pyplot as plt\n'), ((499, 520), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (509, 520), True, 'import tensorflow as tf\n'), ((531, 552), 'tensorflow.squeeze', 'tf.squeeze', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (541, 552), True, 'import tensorflow as tf\n'), ((565, 588), 'tensorflow.squeeze', 'tf.squeeze', (['yaw'], {'axis': '(1)'}), '(yaw, axis=1)\n', (575, 588), True, 'import tensorflow as tf\n'), ((599, 615), 'tensorflow.math.cos', 'tf.math.cos', (['yaw'], {}), '(yaw)\n', (610, 615), True, 'import tensorflow as tf\n'), ((624, 640), 'tensorflow.math.sin', 'tf.math.sin', (['yaw'], {}), '(yaw)\n', (635, 640), True, 'import tensorflow as tf\n'), ((659, 699), 'tensorflow.squeeze', 'tf.squeeze', (["batch['object_type']"], {'axis': '(1)'}), "(batch['object_type'], axis=1)\n", (669, 699), True, 'import tensorflow as tf\n'), ((1038, 1085), 'tensorflow.stack', 'tf.stack', (['[future_ego_x, future_ego_y]'], {'axis': '(-1)'}), '([future_ego_x, future_ego_y], axis=-1)\n', (1046, 1085), True, 'import tensorflow as tf\n'), ((3016, 3056), 'numpy.concatenate', 'np.concatenate', (['assignments_list'], {'axis': '(0)'}), '(assignments_list, axis=0)\n', (3030, 3056), True, 'import numpy as np\n'), ((4380, 4406), 'numpy.where', 'np.where', (['(assignments == i)'], {}), '(assignments == i)\n', (4388, 4406), True, 'import numpy as np\n'), ((4534, 4585), 'numpy.sum', 'np.sum', (['(member_trajectories * member_avails)'], {'axis': '(0)'}), '(member_trajectories * member_avails, axis=0)\n', (4540, 4585), True, 'import numpy as np\n'), ((5055, 5086), 'numpy.any', 'np.any', (['avails[indices]'], {'axis': '(0)'}), '(avails[indices], axis=0)\n', (5061, 5086), True, 'import numpy as np\n'), ((5259, 5282), 'numpy.zeros', 'np.zeros', (['(224, 448, 3)'], {}), '((224, 448, 3))\n', (5267, 5282), True, 'import numpy as np\n'), ((5287, 5349), 'cv2.polylines', 'cv2.polylines', (['image', '[trajectory]', '(False)'], {'color': 'colors[i % 8]'}), '(image, [trajectory], False, color=colors[i % 8])\n', (5300, 5349), False, 'import cv2\n'), ((5358, 5381), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image / 255)'], {}), '(image / 255)\n', (5368, 5381), True, 'import matplotlib.pyplot as plt\n'), ((5384, 5394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5392, 5394), True, 'import matplotlib.pyplot as plt\n'), ((5953, 6023), 'cv2.polylines', 'cv2.polylines', (['image', 'track_trajectory', '(False)'], {'color': 'colors[index % 8]'}), '(image, track_trajectory, False, color=colors[index % 8])\n', (5966, 6023), False, 'import cv2\n'), ((6560, 6581), 'tensorflow.squeeze', 'tf.squeeze', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (6570, 6581), True, 'import tensorflow as tf\n'), ((6592, 6613), 'tensorflow.squeeze', 'tf.squeeze', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (6602, 6613), True, 'import tensorflow as tf\n'), ((6626, 6649), 'tensorflow.squeeze', 'tf.squeeze', (['yaw'], {'axis': '(1)'}), '(yaw, axis=1)\n', (6636, 6649), True, 'import tensorflow as tf\n'), ((6660, 6676), 'tensorflow.math.cos', 'tf.math.cos', (['yaw'], {}), '(yaw)\n', (6671, 6676), True, 'import tensorflow as tf\n'), ((6685, 6701), 'tensorflow.math.sin', 'tf.math.sin', (['yaw'], {}), '(yaw)\n', (6696, 6701), True, 'import tensorflow as tf\n'), ((7038, 7085), 'tensorflow.stack', 'tf.stack', (['[future_ego_x, future_ego_y]'], {'axis': '(-1)'}), '([future_ego_x, future_ego_y], axis=-1)\n', (7046, 7085), True, 'import tensorflow as tf\n'), ((7320, 7343), 'numpy.zeros', 'np.zeros', (['(224, 448, 3)'], {}), '((224, 448, 3))\n', (7328, 7343), True, 'import numpy as np\n'), ((7346, 7406), 'cv2.polylines', 'cv2.polylines', (['image', '[trajectory]', '(False)'], {'color': '(0, 255, 0)'}), '(image, [trajectory], False, color=(0, 255, 0))\n', (7359, 7406), False, 'import cv2\n'), ((7411, 7434), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image / 255)'], {}), '(image / 255)\n', (7421, 7434), True, 'import matplotlib.pyplot as plt\n'), ((7437, 7447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7445, 7447), True, 'import matplotlib.pyplot as plt\n'), ((8007, 8048), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(10)'}), '(learning_rate=10)\n', (8030, 8048), True, 'import tensorflow as tf\n'), ((9813, 9836), 'numpy.zeros', 'np.zeros', (['(224, 448, 3)'], {}), '((224, 448, 3))\n', (9821, 9836), True, 'import numpy as np\n'), ((9841, 9897), 'cv2.polylines', 'cv2.polylines', (['image', '[centroid]', '(False)', '(255, 255, 255)'], {}), '(image, [centroid], False, (255, 255, 255))\n', (9854, 9897), False, 'import cv2\n'), ((9987, 10015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 20)'}), '(figsize=(10, 20))\n', (9997, 10015), True, 'import matplotlib.pyplot as plt\n'), ((10020, 10043), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(image / 255)'], {}), '(image / 255)\n', (10030, 10043), True, 'import matplotlib.pyplot as plt\n'), ((10046, 10056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10054, 10056), True, 'import matplotlib.pyplot as plt\n'), ((10754, 10779), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(16)'], {}), '(16)\n', (10775, 10779), True, 'import tensorflow as tf\n'), ((10937, 10978), 'tensorflow.transpose', 'tf.transpose', (['initial_knots', '[0, 1, 3, 2]'], {}), '(initial_knots, [0, 1, 3, 2])\n', (10949, 10978), True, 'import tensorflow as tf\n'), ((11030, 11103), 'tensorflow.range', 'tf.range', ([], {'start': '(0.0)', 'limit': 'max_pos', 'delta': '(max_pos / 80)', 'dtype': 'knots.dtype'}), '(start=0.0, limit=max_pos, delta=max_pos / 80, dtype=knots.dtype)\n', (11038, 11103), True, 'import tensorflow as tf\n'), ((11790, 11839), 'cv2.circle', 'cv2.circle', (['image', '(pt[0], pt[1])', '(1)', '(255, 0, 0)'], {}), '(image, (pt[0], pt[1]), 1, (255, 0, 0))\n', (11800, 11839), False, 'import cv2\n'), ((290, 335), 'tensorflow.squeeze', 'tf.squeeze', (["batch['gt_future_states']"], {'axis': '(1)'}), "(batch['gt_future_states'], axis=1)\n", (300, 335), True, 'import tensorflow as tf\n'), ((372, 419), 'tensorflow.squeeze', 'tf.squeeze', (["batch['gt_future_is_valid']"], {'axis': '(1)'}), "(batch['gt_future_is_valid'], axis=1)\n", (382, 419), True, 'import tensorflow as tf\n'), ((4612, 4641), 'numpy.sum', 'np.sum', (['member_avails'], {'axis': '(0)'}), '(member_avails, axis=0)\n', (4618, 4641), True, 'import numpy as np\n'), ((5003, 5029), 'numpy.where', 'np.where', (['(assignments == i)'], {}), '(assignments == i)\n', (5011, 5029), True, 'import numpy as np\n'), ((6341, 6386), 'tensorflow.squeeze', 'tf.squeeze', (["batch['gt_future_states']"], {'axis': '(1)'}), "(batch['gt_future_states'], axis=1)\n", (6351, 6386), True, 'import tensorflow as tf\n'), ((6423, 6470), 'tensorflow.squeeze', 'tf.squeeze', (["batch['gt_future_is_valid']"], {'axis': '(1)'}), "(batch['gt_future_is_valid'], axis=1)\n", (6433, 6470), True, 'import tensorflow as tf\n'), ((9586, 9615), 'numpy.any', 'np.any', (['all_avails[i]'], {'axis': '(0)'}), '(all_avails[i], axis=0)\n', (9592, 9615), True, 'import numpy as np\n'), ((9933, 9982), 'cv2.circle', 'cv2.circle', (['image', '(pt[0], pt[1])', '(1)', '(255, 0, 0)'], {}), '(image, (pt[0], pt[1]), 1, (255, 0, 0))\n', (9943, 9982), False, 'import cv2\n'), ((8236, 8287), 'numpy.stack', 'np.stack', (['[current_avails, current_avails]'], {'axis': '(-1)'}), '([current_avails, current_avails], axis=-1)\n', (8244, 8287), True, 'import numpy as np\n'), ((8371, 8396), 'numpy.zeros', 'np.zeros', (['(num_examples,)'], {}), '((num_examples,))\n', (8379, 8396), True, 'import numpy as np\n'), ((9198, 9226), 'numpy.where', 'np.where', (['(assignments_K == i)'], {}), '(assignments_K == i)\n', (9206, 9226), True, 'import numpy as np\n'), ((3886, 3910), 'numpy.min', 'np.min', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (3892, 3910), True, 'import numpy as np\n'), ((8562, 8575), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (8570, 8575), True, 'import numpy as np\n')] |
import pandas as pd
import os
import numpy as np
import argparse
import warnings
parser = argparse.ArgumentParser('Bayes ratio and Brier score for histogram of two variables')
parser.add_argument('file', type=str,
metavar='DF',
help='Location where pkl file saved')
parser.add_argument('--nbins', type=int, default=100)
parser.add_argument('--yvar', type=str, default='model_entropy')
parser.add_argument('--xvar', type=str, default='rank')
parser.add_argument('--xbins', type=float, default=[], nargs='*')
parser.add_argument('--ybins', type=float, default=[], nargs='*')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--eps', type=float, default=0)
parser.add_argument('--K', type=int, default=10)
parser.add_argument('--exclude', type=int, default=[], nargs='*')
parser.set_defaults(show=True)
parser.set_defaults(save=False)
args = parser.parse_args()
np.random.seed(args.seed)
from common import labdict
print('X: %s, Y: %s'%(args.xvar, args.yvar))
df = pd.read_pickle(args.file)
df.drop(args.exclude)
Nsamples = len(df)
K = args.K
N = len(df)
Ix = np.random.permutation(N)
X_ = df[args.xvar]
Y_ = df[args.yvar]
EBR1 = []
EBR5 = []
#n = N//K
#ix = Ix[n*i:n*(i+1)]
#X = np.delete(X_.to_numpy(), ix)
#Y = np.delete(Y_.to_numpy(), ix)
X = X_[Ix]
Y = Y_[Ix]
Nbins = args.nbins
if len(args.ybins)==0:
Yc, Ybins = pd.qcut(Y,Nbins,retbins=True,duplicates='drop')
else:
Yc, Ybins = pd.cut(Y,args.ybins,retbins=True, duplicates='drop', right=False)
if len(args.xbins)==0:
Xc, Xbins = pd.qcut(X,Nbins,retbins=True,duplicates='drop')
else:
Xc, Xbins = pd.cut(X,args.xbins,retbins=True,duplicates='drop', right=False)
#Yvc = Yc.value_counts(sort=False)
#Xvc = Xc.value_counts(sort=False)
H, xe, ye = np.histogram2d(X, Y, bins=[Xbins, Ybins])
P = H/np.sum(H)
Ptop1 = df['top1'].sum()/len(df)
Ptop5 = df['top5'].sum()/len(df)
Otop1 = Ptop1/(1-Ptop1)
Otop5 = Ptop5/(1-Ptop5)
Py = P.sum(axis=0)
Ptop1xbins = P[Xbins[:-1]==0,:].reshape(-1)/Py
Brier1 = Ptop1xbins*(Ptop1xbins - 1)**2 + (1-Ptop1xbins)*Ptop1xbins**2
ix = np.arange(len(Ptop1xbins))
ix1 = Ptop1xbins==1
try:
lb = np.max(ix[ix1])+1
except ValueError as e:
lb = 0
Ptop1xbins[0:(lb+1)] = np.sum(Ptop1xbins[0:(lb+1)])/(lb+1)
ix0 = Ptop1xbins==0
try:
ub = np.min(ix[ix0])
except ValueError as e:
ub = len(Ptop1xbins)
Ptop1xbins[ub:] = np.sum(Ptop1xbins[ub:])/(len(Ptop1xbins)-ub+1)
Otop1xbins = Ptop1xbins/(1-Ptop1xbins+args.eps)
BR1 = Otop1xbins/Otop1
Ptop5xbins = P[Xbins[:-1]<5,:].sum(axis=0)/Py
Brier5 = Ptop5xbins*(Ptop5xbins - 1)**2 + (1-Ptop5xbins)*Ptop5xbins**2
ix5 = Ptop5xbins==1
try:
lb = np.max(ix[ix5])+1
except ValueError as e:
lb = 0
Ptop5xbins[0:(lb+1)] = np.sum(Ptop5xbins[0:(lb+1)])/(lb+1)
ix0 = Ptop5xbins==0
try:
ub = np.min(ix[ix0])
except ValueError as e:
ub = len(Ptop5xbins)
Ptop5xbins[ub:] = np.sum(Ptop5xbins[ub:])/(len(Ptop5xbins)-ub+1)
Otop5xbins = Ptop5xbins/(1-Ptop5xbins+args.eps)
BR5 = Otop5xbins/Otop5
BR1 = np.max([BR1,1/BR1],axis=0)
BR5 = np.max([BR5,1/BR5],axis=0)
EBR1.append(np.sum(Py*BR1))
EBR5.append(np.sum(Py*BR5))
print('E[Bayes ratio, top1] = %.3f'%np.mean(EBR1))
print('E[Bayes ratio, top5] = %.3f'%np.mean(EBR5))
print('\nBrier, top1 = %.3f'%np.sum(Py*Brier1))
print('Brier, top5 = %.3f'%np.sum(Py*Brier5))
| [
"pandas.read_pickle",
"numpy.mean",
"argparse.ArgumentParser",
"pandas.qcut",
"pandas.cut",
"numpy.max",
"numpy.sum",
"numpy.random.seed",
"numpy.min",
"numpy.histogram2d",
"numpy.random.permutation"
] | [((92, 182), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bayes ratio and Brier score for histogram of two variables"""'], {}), "(\n 'Bayes ratio and Brier score for histogram of two variables')\n", (115, 182), False, 'import argparse\n'), ((906, 931), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (920, 931), True, 'import numpy as np\n'), ((1013, 1038), 'pandas.read_pickle', 'pd.read_pickle', (['args.file'], {}), '(args.file)\n', (1027, 1038), True, 'import pandas as pd\n'), ((1110, 1134), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (1131, 1134), True, 'import numpy as np\n'), ((1773, 1814), 'numpy.histogram2d', 'np.histogram2d', (['X', 'Y'], {'bins': '[Xbins, Ybins]'}), '(X, Y, bins=[Xbins, Ybins])\n', (1787, 1814), True, 'import numpy as np\n'), ((3013, 3043), 'numpy.max', 'np.max', (['[BR1, 1 / BR1]'], {'axis': '(0)'}), '([BR1, 1 / BR1], axis=0)\n', (3019, 3043), True, 'import numpy as np\n'), ((3046, 3076), 'numpy.max', 'np.max', (['[BR5, 1 / BR5]'], {'axis': '(0)'}), '([BR5, 1 / BR5], axis=0)\n', (3052, 3076), True, 'import numpy as np\n'), ((1377, 1427), 'pandas.qcut', 'pd.qcut', (['Y', 'Nbins'], {'retbins': '(True)', 'duplicates': '"""drop"""'}), "(Y, Nbins, retbins=True, duplicates='drop')\n", (1384, 1427), True, 'import pandas as pd\n'), ((1447, 1514), 'pandas.cut', 'pd.cut', (['Y', 'args.ybins'], {'retbins': '(True)', 'duplicates': '"""drop"""', 'right': '(False)'}), "(Y, args.ybins, retbins=True, duplicates='drop', right=False)\n", (1453, 1514), True, 'import pandas as pd\n'), ((1553, 1603), 'pandas.qcut', 'pd.qcut', (['X', 'Nbins'], {'retbins': '(True)', 'duplicates': '"""drop"""'}), "(X, Nbins, retbins=True, duplicates='drop')\n", (1560, 1603), True, 'import pandas as pd\n'), ((1623, 1690), 'pandas.cut', 'pd.cut', (['X', 'args.xbins'], {'retbins': '(True)', 'duplicates': '"""drop"""', 'right': '(False)'}), "(X, args.xbins, retbins=True, duplicates='drop', right=False)\n", (1629, 1690), True, 'import pandas as pd\n'), ((1822, 1831), 'numpy.sum', 'np.sum', (['H'], {}), '(H)\n', (1828, 1831), True, 'import numpy as np\n'), ((2231, 2259), 'numpy.sum', 'np.sum', (['Ptop1xbins[0:lb + 1]'], {}), '(Ptop1xbins[0:lb + 1])\n', (2237, 2259), True, 'import numpy as np\n'), ((2302, 2317), 'numpy.min', 'np.min', (['ix[ix0]'], {}), '(ix[ix0])\n', (2308, 2317), True, 'import numpy as np\n'), ((2385, 2408), 'numpy.sum', 'np.sum', (['Ptop1xbins[ub:]'], {}), '(Ptop1xbins[ub:])\n', (2391, 2408), True, 'import numpy as np\n'), ((2733, 2761), 'numpy.sum', 'np.sum', (['Ptop5xbins[0:lb + 1]'], {}), '(Ptop5xbins[0:lb + 1])\n', (2739, 2761), True, 'import numpy as np\n'), ((2804, 2819), 'numpy.min', 'np.min', (['ix[ix0]'], {}), '(ix[ix0])\n', (2810, 2819), True, 'import numpy as np\n'), ((2887, 2910), 'numpy.sum', 'np.sum', (['Ptop5xbins[ub:]'], {}), '(Ptop5xbins[ub:])\n', (2893, 2910), True, 'import numpy as np\n'), ((3085, 3101), 'numpy.sum', 'np.sum', (['(Py * BR1)'], {}), '(Py * BR1)\n', (3091, 3101), True, 'import numpy as np\n'), ((3113, 3129), 'numpy.sum', 'np.sum', (['(Py * BR5)'], {}), '(Py * BR5)\n', (3119, 3129), True, 'import numpy as np\n'), ((2155, 2170), 'numpy.max', 'np.max', (['ix[ix1]'], {}), '(ix[ix1])\n', (2161, 2170), True, 'import numpy as np\n'), ((2657, 2672), 'numpy.max', 'np.max', (['ix[ix5]'], {}), '(ix[ix5])\n', (2663, 2672), True, 'import numpy as np\n'), ((3166, 3179), 'numpy.mean', 'np.mean', (['EBR1'], {}), '(EBR1)\n', (3173, 3179), True, 'import numpy as np\n'), ((3217, 3230), 'numpy.mean', 'np.mean', (['EBR5'], {}), '(EBR5)\n', (3224, 3230), True, 'import numpy as np\n'), ((3261, 3280), 'numpy.sum', 'np.sum', (['(Py * Brier1)'], {}), '(Py * Brier1)\n', (3267, 3280), True, 'import numpy as np\n'), ((3307, 3326), 'numpy.sum', 'np.sum', (['(Py * Brier5)'], {}), '(Py * Brier5)\n', (3313, 3326), True, 'import numpy as np\n')] |
from pgfutils import save, setup_figure
setup_figure(width=0.95, height=0.4)
from matplotlib import pyplot as plt
import numpy as np
noise = np.random.randn(512, 256)
plt.imshow(noise, interpolation="nearest", aspect="auto")
plt.colorbar()
save()
| [
"pgfutils.setup_figure",
"matplotlib.pyplot.imshow",
"pgfutils.save",
"matplotlib.pyplot.colorbar",
"numpy.random.randn"
] | [((42, 78), 'pgfutils.setup_figure', 'setup_figure', ([], {'width': '(0.95)', 'height': '(0.4)'}), '(width=0.95, height=0.4)\n', (54, 78), False, 'from pgfutils import save, setup_figure\n'), ((146, 171), 'numpy.random.randn', 'np.random.randn', (['(512)', '(256)'], {}), '(512, 256)\n', (161, 171), True, 'import numpy as np\n'), ((172, 229), 'matplotlib.pyplot.imshow', 'plt.imshow', (['noise'], {'interpolation': '"""nearest"""', 'aspect': '"""auto"""'}), "(noise, interpolation='nearest', aspect='auto')\n", (182, 229), True, 'from matplotlib import pyplot as plt\n'), ((230, 244), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (242, 244), True, 'from matplotlib import pyplot as plt\n'), ((246, 252), 'pgfutils.save', 'save', ([], {}), '()\n', (250, 252), False, 'from pgfutils import save, setup_figure\n')] |
from geoalchemy2 import Geography, Geometry
from pytz import timezone
from shapely import wkb
from sqlalchemy import (
Column,
Integer, BigInteger,
String,
Boolean,
DateTime,
ForeignKey,
UniqueConstraint,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import deferred, relationship
from sqlalchemy.ext.declarative import declarative_base
from typing import List, Optional
import datetime
import numpy
import statistics
Base = declarative_base()
class Source(Base):
"""
A specific source data may come from.
E.g. NEXRAD L2, GFS, NAM, HRRR
"""
__tablename__ = "source"
id = Column(Integer, primary_key=True)
short_name = Column(String(8), unique=True)
name = Column(String(128), unique=True)
src_url = Column(String(1024))
last_updated = Column(DateTime)
# Fields are backref'd
def serialize(self):
return {
"id": self.id,
"short_name": self.short_name,
"name": self.name,
"src_url": self.src_url,
"last_updated": self.last_updated,
}
def __repr__(self):
return f"<Source id={self.id} short_name='{self.short_name}'>"
class Metric(Base):
"""
A metric that various source fields can have values for.
E.g. temperature, precipitation, visibility
"""
__tablename__ = "metric"
id = Column(Integer, primary_key=True)
name = Column(String(128), unique=True)
units = Column(String(16))
# intermediate metrics aren't displayed to the end user, and are only used for deriving other metrics
intermediate = Column(Boolean, nullable=False, default=False)
def serialize(self):
return {
"id": self.id,
"name": self.name,
"units": self.units,
}
def __repr__(self):
return f"<Metric id={self.id} name='{self.name}'>"
class SourceField(Base):
"""
A specific field inside of a source.
E.g. Composite reflectivity @ entire atmosphere, 2m temps, visibility @ ground
"""
__tablename__ = "source_field"
__table_args__ = (
UniqueConstraint('source_id', 'metric_id'),
)
id = Column(Integer, primary_key=True)
source_id = Column(Integer, ForeignKey('source.id'))
metric_id = Column(Integer, ForeignKey('metric.id'))
projection_id = Column(Integer, ForeignKey('projection.id'))
idx_short_name = Column(String(15)) # e.g. TMP, VIS
idx_level = Column(String(255)) # e.g. surface, 2 m above ground
selectors = Column(JSONB) # e.g. {'name': 'Temperature', 'typeOfLevel': 'surface'}. NULL means this field won't be ingested directly
source = relationship('Source', backref='fields', lazy='joined')
projection = relationship('Projection')
metric = relationship('Metric', backref='fields', lazy='joined')
def serialize(self):
return {
"id": self.id,
"source_id": self.source_id,
"metric_id": self.metric_id,
}
def __repr__(self):
return f"<SourceField id={self.id} short_name='{self.idx_short_name}'>"
class Location(Base):
"""
A specific location that we have a lat/lon for.
"""
__tablename__ = "location"
id = Column(Integer, primary_key=True)
location = Column(Geography('Point,4326'))
name = Column(String(512))
population = Column(Integer)
def get_coords(self):
"""
:return: lon, lat
"""
point = wkb.loads(bytes(self.location.data))
return point.x, point.y
def serialize(self):
coords = self.get_coords()
return {
"id": self.id,
"name": self.name,
"lon": coords[0],
"lat": coords[1],
}
def __repr__(self):
return f"<Location id={self.id} name='{self.name}'>"
class Timezone(Base):
"""
A timezone name and associated geometry.
"""
__tablename__ = "timezone"
name = Column(String(512), primary_key=True)
geom = deferred(Column(Geometry('MULTIPOLYGON')))
def utc_offset(self, dt):
return timezone(self.name).utcoffset(dt)
class Projection(Base):
"""
Table that holds data about the projection a given ingested file uses.
"""
__tablename__ = "projection"
id = Column(Integer, primary_key=True)
params = Column(JSONB)
n_x = Column(Integer)
n_y = Column(Integer)
ll_hash = Column(BigInteger)
lats = deferred(Column(JSONB))
lons = deferred(Column(JSONB))
def shape(self):
return (self.n_y, self.n_x)
class FileMeta(Base):
"""
Table that holds metadata about denormalized data in a given file.
Each file can hold any data (different fields, different sources even) as long
as it has a single projection.
"""
__tablename__ = "file_meta"
file_name = Column(String(4096), primary_key=True)
projection_id = Column(Integer, ForeignKey('projection.id'))
ctime = Column(DateTime, default=datetime.datetime.utcnow)
loc_size = Column(Integer, nullable=False)
projection = relationship('Projection')
class FileBandMeta(Base):
"""
Table that holds data about specific runs of denormalized data in the given file.
"""
__tablename__ = "file_band_meta"
# TODO: on delete of file meta, delete these
# PKs
file_name = Column(String, ForeignKey('file_meta.file_name'), primary_key=True)
offset = Column(Integer, primary_key=True) # offset within a (x,y) chunk, _not_ offset in the entire file
# Metadata used to seek into the file
vals_per_loc = Column(Integer)
# Metadata
source_field_id = Column(Integer, ForeignKey('source_field.id'))
valid_time = Column(DateTime)
run_time = Column(DateTime)
file_meta = relationship('FileMeta', backref='bands', lazy='joined')
source_field = relationship('SourceField', lazy='joined')
class DataPointSet(object):
"""
Non-db object which holds values and metadata for given data point (loc, time)
"""
values: List[float]
metric_id: int
valid_time: datetime.datetime
source_field_id: Optional[int]
run_time: Optional[datetime.datetime]
derived: bool
synthesized: bool
def __init__(
self,
values: List[float],
metric_id: int,
valid_time: datetime.datetime,
source_field_id: Optional[int] = None,
run_time: Optional[datetime.datetime] = None,
derived: bool = False,
synthesized: bool = False):
self.values = values
self.metric_id = metric_id
self.valid_time = valid_time
# Optional fields
self.source_field_id = source_field_id
self.run_time = run_time
self.derived = derived
self.synthesized = synthesized
def __repr__(self):
return f"<DataPointSet metric_id={self.metric_id} valid_time={self.valid_time} source_field_id={self.source_field_id} derived={self.derived} synthesized={self.synthesized}>"
def min(self) -> float:
return min(self.values)
def max(self) -> float:
return max(self.values)
def median(self) -> float:
return statistics.median(self.values)
def median_confidence(self) -> float:
vals = numpy.array(self.values)
n_within_stddev = (abs(vals - self.median()) < numpy.std(vals)).sum()
return n_within_stddev / len(vals)
def mean(self) -> float:
return statistics.mean(self.values)
def mean_confidence(self) -> float:
vals = numpy.array(self.values)
n_within_stddev = (abs(vals - self.mean()) < numpy.std(vals)).sum()
return n_within_stddev / len(vals)
| [
"statistics.mean",
"sqlalchemy.orm.relationship",
"geoalchemy2.Geography",
"pytz.timezone",
"sqlalchemy.ForeignKey",
"sqlalchemy.UniqueConstraint",
"sqlalchemy.String",
"statistics.median",
"numpy.array",
"geoalchemy2.Geometry",
"sqlalchemy.ext.declarative.declarative_base",
"numpy.std",
"sq... | [((481, 499), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (497, 499), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((654, 687), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (660, 687), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((834, 850), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (840, 850), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((1399, 1432), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1405, 1432), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((1633, 1679), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)', 'default': '(False)'}), '(Boolean, nullable=False, default=False)\n', (1639, 1679), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2201, 2234), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2207, 2234), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2558, 2571), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (2564, 2571), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2694, 2749), 'sqlalchemy.orm.relationship', 'relationship', (['"""Source"""'], {'backref': '"""fields"""', 'lazy': '"""joined"""'}), "('Source', backref='fields', lazy='joined')\n", (2706, 2749), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((2767, 2793), 'sqlalchemy.orm.relationship', 'relationship', (['"""Projection"""'], {}), "('Projection')\n", (2779, 2793), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((2807, 2862), 'sqlalchemy.orm.relationship', 'relationship', (['"""Metric"""'], {'backref': '"""fields"""', 'lazy': '"""joined"""'}), "('Metric', backref='fields', lazy='joined')\n", (2819, 2862), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((3263, 3296), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3269, 3296), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((3392, 3407), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3398, 3407), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4323, 4356), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (4329, 4356), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4370, 4383), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (4376, 4383), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4394, 4409), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (4400, 4409), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4420, 4435), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (4426, 4435), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4450, 4468), 'sqlalchemy.Column', 'Column', (['BigInteger'], {}), '(BigInteger)\n', (4456, 4468), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4991, 5041), 'sqlalchemy.Column', 'Column', (['DateTime'], {'default': 'datetime.datetime.utcnow'}), '(DateTime, default=datetime.datetime.utcnow)\n', (4997, 5041), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5057, 5088), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (5063, 5088), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5107, 5133), 'sqlalchemy.orm.relationship', 'relationship', (['"""Projection"""'], {}), "('Projection')\n", (5119, 5133), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((5458, 5491), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (5464, 5491), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5618, 5633), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (5624, 5633), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5736, 5752), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (5742, 5752), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5768, 5784), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (5774, 5784), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5802, 5858), 'sqlalchemy.orm.relationship', 'relationship', (['"""FileMeta"""'], {'backref': '"""bands"""', 'lazy': '"""joined"""'}), "('FileMeta', backref='bands', lazy='joined')\n", (5814, 5858), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((5878, 5920), 'sqlalchemy.orm.relationship', 'relationship', (['"""SourceField"""'], {'lazy': '"""joined"""'}), "('SourceField', lazy='joined')\n", (5890, 5920), False, 'from sqlalchemy.orm import deferred, relationship\n'), ((712, 721), 'sqlalchemy.String', 'String', (['(8)'], {}), '(8)\n', (718, 721), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((754, 765), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (760, 765), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((801, 813), 'sqlalchemy.String', 'String', (['(1024)'], {}), '(1024)\n', (807, 813), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((1451, 1462), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (1457, 1462), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((1496, 1506), 'sqlalchemy.String', 'String', (['(16)'], {}), '(16)\n', (1502, 1506), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2141, 2183), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""source_id"""', '"""metric_id"""'], {}), "('source_id', 'metric_id')\n", (2157, 2183), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2267, 2290), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""source.id"""'], {}), "('source.id')\n", (2277, 2290), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2324, 2347), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""metric.id"""'], {}), "('metric.id')\n", (2334, 2347), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2385, 2412), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""projection.id"""'], {}), "('projection.id')\n", (2395, 2412), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2443, 2453), 'sqlalchemy.String', 'String', (['(15)'], {}), '(15)\n', (2449, 2453), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((2495, 2506), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (2501, 2506), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((3319, 3342), 'geoalchemy2.Geography', 'Geography', (['"""Point,4326"""'], {}), "('Point,4326')\n", (3328, 3342), False, 'from geoalchemy2 import Geography, Geometry\n'), ((3362, 3373), 'sqlalchemy.String', 'String', (['(512)'], {}), '(512)\n', (3368, 3373), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((3998, 4009), 'sqlalchemy.String', 'String', (['(512)'], {}), '(512)\n', (4004, 4009), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4489, 4502), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (4495, 4502), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4524, 4537), 'sqlalchemy.Column', 'Column', (['JSONB'], {}), '(JSONB)\n', (4530, 4537), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4882, 4894), 'sqlalchemy.String', 'String', (['(4096)'], {}), '(4096)\n', (4888, 4894), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((4950, 4977), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""projection.id"""'], {}), "('projection.id')\n", (4960, 4977), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5392, 5425), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""file_meta.file_name"""'], {}), "('file_meta.file_name')\n", (5402, 5425), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((5688, 5717), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""source_field.id"""'], {}), "('source_field.id')\n", (5698, 5717), False, 'from sqlalchemy import Column, Integer, BigInteger, String, Boolean, DateTime, ForeignKey, UniqueConstraint\n'), ((7223, 7253), 'statistics.median', 'statistics.median', (['self.values'], {}), '(self.values)\n', (7240, 7253), False, 'import statistics\n'), ((7312, 7336), 'numpy.array', 'numpy.array', (['self.values'], {}), '(self.values)\n', (7323, 7336), False, 'import numpy\n'), ((7503, 7531), 'statistics.mean', 'statistics.mean', (['self.values'], {}), '(self.values)\n', (7518, 7531), False, 'import statistics\n'), ((7588, 7612), 'numpy.array', 'numpy.array', (['self.values'], {}), '(self.values)\n', (7599, 7612), False, 'import numpy\n'), ((4056, 4080), 'geoalchemy2.Geometry', 'Geometry', (['"""MULTIPOLYGON"""'], {}), "('MULTIPOLYGON')\n", (4064, 4080), False, 'from geoalchemy2 import Geography, Geometry\n'), ((4129, 4148), 'pytz.timezone', 'timezone', (['self.name'], {}), '(self.name)\n', (4137, 4148), False, 'from pytz import timezone\n'), ((7392, 7407), 'numpy.std', 'numpy.std', (['vals'], {}), '(vals)\n', (7401, 7407), False, 'import numpy\n'), ((7666, 7681), 'numpy.std', 'numpy.std', (['vals'], {}), '(vals)\n', (7675, 7681), False, 'import numpy\n')] |
#!/usr/bin/python
# MIT License
#
# Copyright (c) 2021 <NAME>, ASL, ETH Zurich, Switzerland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rosbag
import rospy
import numpy as np
from datetime import datetime
def detect_circles(input_bag, out_file, trajectory_topic):
# Get trajectory start time and segment times from bag.
bag = rosbag.Bag(input_bag)
segment_times = []
t = rospy.Time(0)
for topic, msg, t in bag.read_messages(topics=[trajectory_topic]):
t = msg.header.stamp
end = t
print('Start time: %d.%d' %(t.secs, t.nsecs))
for segment in msg.segments:
segment_times.append(segment.segment_time)
# Search for consecutive identical segment times. These are the circle segments!
diff = np.diff(segment_times)
change = diff == rospy.Duration(0)
start = []
stop = []
for idx in range(0, len(segment_times) - 2):
t = t + segment_times[idx]
if change[idx] == False and change[idx + 1] == True:
start.append(t)
elif change[idx] == True and change[idx + 1] == False:
stop.append(t + segment_times[idx])
# Merge
segment_id = []
for a, b in zip(start, stop):
segment_id.append([a, b])
print('Circle times:')
print(segment_id)
f = open(out_file, 'w')
f.write('id,start,stop\n')
id = 1
for circle in segment_id:
# Convert UTC to radar time (seconds since midnight)
start_time = datetime.utcfromtimestamp(circle[0].to_sec())
start_time = (start_time - start_time.replace(hour=0, minute=0, second=0, microsecond=0))
end_time = datetime.utcfromtimestamp(circle[1].to_sec())
end_time = (end_time - end_time.replace(hour=0, minute=0, second=0, microsecond=0))
str = '{:02d},{:.6f},{:.6f}\n'.format(id, start_time.total_seconds(), end_time.total_seconds())
f.write(str)
id = id + 1
f.close()
| [
"rospy.Duration",
"rospy.Time",
"numpy.diff",
"rosbag.Bag"
] | [((1357, 1378), 'rosbag.Bag', 'rosbag.Bag', (['input_bag'], {}), '(input_bag)\n', (1367, 1378), False, 'import rosbag\n'), ((1410, 1423), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (1420, 1423), False, 'import rospy\n'), ((1782, 1804), 'numpy.diff', 'np.diff', (['segment_times'], {}), '(segment_times)\n', (1789, 1804), True, 'import numpy as np\n'), ((1826, 1843), 'rospy.Duration', 'rospy.Duration', (['(0)'], {}), '(0)\n', (1840, 1843), False, 'import rospy\n')] |
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype
from datetime import datetime
from os import mkdir
from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer
import concurrent
# import cProfile
from statistics import mean
# from math import factorial
# from tqdm import tqdm
# from scipy.stats import poisson
# import matplotlib.pyplot as plt
# from numba import jit
'''
This file evaluates the presence of outliers in 3+ dimensions in the openml.org dataset collection
'''
np.random.seed(0)
pd.options.display.max_columns = 1000
pd.options.display.max_rows = 1000
pd.options.display.width = 10000
DIVISOR = 0.25 # todo: loop through different values of this to see how it affects the results.
def flatten(arr):
flatten_1d = lambda x: [i for row in x for i in row]
if len(arr) == 0:
return arr
try:
while True:
arr = flatten_1d(arr)
if len(arr) == 0:
return arr
except:
pass
return arr
def is_float(v):
if str(v).isdigit():
return False
try:
float(v)
return True
except ValueError:
return False
class CountsOutlierDetector:
def __init__(self, n_bins=7, max_dimensions=5, results_folder="", results_name="", run_parallel=False):
self.n_bins = n_bins
self.max_dimensions = max_dimensions
self.results_folder = results_folder
self.results_name = results_name
self.run_parallel = run_parallel
self.col_types_arr = []
self.ordinal_encoders_arr = []
def get_col_types_arr(self, X):
col_types_arr = ['N'] * len(X.columns)
for c in range(len(X.columns)):
num_unique = X[X.columns[c]].nunique()
if not is_numeric_dtype(X[X.columns[c]]):
col_types_arr[c] = 'C'
# Even if the values are numeric, if there are few of them, consider them categorical, though if the values
# are all float, the column will be cast to 'N' when collecting the unique values.
elif is_numeric_dtype(X[X.columns[c]]) and num_unique <= 25:
col_types_arr[c] = 'C'
# If there are a large number of categorical columns, re-determine the types with a more strict cutoff
if col_types_arr.count('C') > 50:
col_types_arr = ['N'] * len(X.columns)
for c in range(len(X.columns)):
num_unique = X[X.columns[c]].nunique()
if not is_numeric_dtype(X[X.columns[c]]):
col_types_arr[c] = 'C'
elif is_numeric_dtype(X[X.columns[c]]) and num_unique <= 5:
col_types_arr[c] = 'C'
return col_types_arr
def ordinal_encode(self, X):
# Numpy deals with numeric values much more efficiently than text
self.ordinal_encoders_arr = [None]*len(X.columns)
for i in range(len(X.columns)):
if self.col_types_arr[i] == 'C':
enc = OrdinalEncoder()
self.ordinal_encoders_arr[i] = enc
col_vals = X[X.columns[i]].values.reshape(-1, 1)
X_np = enc.fit_transform(col_vals).astype(int)
X[X.columns[i]] = X_np
return X
def get_col_value(self, col_idx, value_idx):
if self.col_types_arr[col_idx] == "C":
return self.ordinal_encoders_arr[col_idx].inverse_transform([[value_idx]])[0][0]
else:
return f"Bin {value_idx}"
# Using numba appears to give similar performance results
# @jit(nopython=True)
# def get_cond(X_vals, col_idx, val):
# cond = (X_vals[:, col_idx] == val)
# return cond
def predict(self, X):
# todo: rename this -- it doesn't display
def format_outlier_counts(msg, arr):
nonlocal output_msg
unique_counts = sorted(list(set(arr)))
for uc in unique_counts:
output_msg += f"\n{msg}: {uc}: {arr.count(uc):5}"
# Given two columns i and j, gets, for each pair of values, the fraction of the dataset and the row numbers.
def get_2d_fractions(i, j):
two_d_fractions = []
two_d_row_nums = []
for i_val in unique_vals[i]:
i_vals_fractions = []
i_vals_row_nums = []
cond1 = (X_vals[:, i] == i_val)
for j_val in unique_vals[j]:
#rows_both = np.where((X_vals[:, i] == i_val) & (X_vals[:, j] == j_val))
cond2 = (X_vals[:, j] == j_val)
rows_both = np.where(cond1 & cond2)
i_vals_fractions.append(len(rows_both[0]) / num_rows)
i_vals_row_nums.append(rows_both[0])
two_d_fractions.append(i_vals_fractions)
two_d_row_nums.append(i_vals_row_nums)
return two_d_fractions, two_d_row_nums
def get_unique_vals():
nonlocal output_msg
# An element for each column. For categorical columns, lists the unique values. Used to maintain a
# consistent order.
unique_vals = [[]] * num_cols
num_unique_vals = [0] * num_cols
for i in range(num_cols):
uv = X.iloc[:, i].unique()
# If there are many unique values, remove the float values
# todo: set this threshold as a parameter
# todo: need to save the pre-ordinal encoded values to do this.
# todo: or could not do it: then don't need to save the unique values, just the count and can assume it's
# 0 up to that.
#if len(uv) > 25:
# uv = [v for v in uv if not is_float(v)]
col_threshold = (1.0 / len(uv)) * DIVISOR
unique_vals[i] = uv
num_unique_vals[i] = len(uv)
return unique_vals, num_unique_vals
def get_1d_stats():
nonlocal output_msg
# Parallel 2d array to unique_vals. Indicates the fraction of the total dataset for this value in this column.
fractions_1d = [[]] * num_cols
# Parallel 2d array to unique_vals. Boolean value indicates if the value is considered a 1d outlier.
rare_1d_values = [[]] * num_cols
# Integer value for each row in the dataset indicating how many individual columns have values considered
# outliers.
outliers_1d_arr = [0] * num_rows
# Text explanation for each row explaining each 1d outlier.
outliers_explanation_arr = [""] * num_rows
for i in range(num_cols):
col_threshold = (1.0 / num_unique_vals[i]) * DIVISOR
# Array with an element for each unique value in column i. Indicates the fraction of the total dataset held
# by that value.
col_fractions_1d = []
# Array with an element for each unique value in column i. Indicates if that value is considered rare.
col_rare_1d_values = []
for v in unique_vals[i]: # loop through each unique value in the current column.
frac = X.iloc[:, i].tolist().count(v) / num_rows
col_fractions_1d.append(frac)
rare_values_flag = (frac < col_threshold) and (frac < 0.01)
if rare_values_flag:
rows_matching = np.where(X_vals[:, i] == v)
for r in rows_matching[0]:
outliers_1d_arr[r] += 1
outliers_explanation_arr[r] += f'[Column: {X.columns[i]}, ' + \
f'Value: {self.get_col_value(i,v)}, fraction: {frac}]'
col_rare_1d_values.append(rare_values_flag)
fractions_1d[i] = col_fractions_1d
rare_1d_values[i] = col_rare_1d_values
output_msg += f"\n\n1d: num common values: {flatten(rare_1d_values).count(False)}"
output_msg += f"\n1d: num rare values: {flatten(rare_1d_values).count(True)}"
format_outlier_counts("1d: Outlier Counts by score", outliers_1d_arr)
return fractions_1d, rare_1d_values, outliers_1d_arr, outliers_explanation_arr
def get_2d_stats():
nonlocal output_msg
# This returns 2 parallel 4d arrays, fractions_2d and rare_2d_values, with the dimensions: i column,
# j column, value in i column, value in j column
# Each element stores the fraction of the total dataset with this combination of values.
fractions_2d = [] * num_cols
# Each element stores a boolean indicating if this combination of values is considered rare in the 2d sense.
rare_2d_values = [] * num_cols
# Integer value for each row in the dataset indicating how many pairs of columns have combinations considered
# outliers.
outliers_2d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
for i in range(num_cols):
fractions_2d.append([[]] * num_cols)
rare_2d_values.append([[]] * num_cols)
for i in range(num_cols - 1):
#print("2d i: ",i)
for j in range(i + 1, num_cols):
local_fractions, two_d_row_nums = get_2d_fractions(i, j)
fractions_2d[i][j] = local_fractions
# Determine which of these fraction would be considered rare in the 2d sense
i_rare_arr = []
expected_under_uniform = 1.0 / (len(unique_vals[i]) * len(unique_vals[j]))
for i_vals_idx in range(len(fractions_2d[i][j])):
j_rare_arr = []
for j_vals_idx in range(len(fractions_2d[i][j][i_vals_idx])):
current_fraction = fractions_2d[i][j][i_vals_idx][j_vals_idx]
expected_given_marginal = fractions_1d[i][i_vals_idx] * fractions_1d[j][j_vals_idx]
rare_value_flag = (rare_1d_values[i][i_vals_idx] == False) and \
(rare_1d_values[j][j_vals_idx] == False) and \
(current_fraction < (expected_under_uniform * DIVISOR)) and \
(current_fraction < (expected_given_marginal * DIVISOR)) and \
(current_fraction < 0.01)
if rare_value_flag:
row_nums = two_d_row_nums[i_vals_idx][j_vals_idx]
assert len(row_nums) == round(
current_fraction * num_rows), f"len of matching rows: {len(row_nums)}, fraction*num_rows: current_fraction*num_rows: {current_fraction * num_rows}"
for r in row_nums:
outliers_2d_arr[r] += 1
# todo: format this for 3, 4, 5d too
outliers_explanation_arr[r] += f" [[Columns: {X.columns[i]} and " +\
f"{X.columns[j]} Values: {self.get_col_value(i, i_vals_idx)} and " +\
f"{self.get_col_value(j, j_vals_idx)}, Fraction: {current_fraction}]]"
j_rare_arr.append(rare_value_flag)
i_rare_arr.append(j_rare_arr)
rare_2d_values[i][j] = i_rare_arr
out = flatten(rare_2d_values)
output_msg += f"\n\n2d: num common combinations: {out.count(False)}"
output_msg += f"\n2d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("2d: Outlier Counts by score", outliers_2d_arr)
return fractions_2d, rare_2d_values, outliers_2d_arr, outliers_explanation_arr
def get_3d_stats(num_combinations):
nonlocal output_msg
# This returns 2 parallel 6d arrays: fractions_3d and rare_3d_values (with the dimensions: i column, j column,
# k column, value in i column, value in j column, value in the k column), as well as outliers_3d_arr and
# outliers_explanation_arr.
fractions_3d = [[]] * num_cols # todo: not used, though will if go to 4d
rare_3d_values = [[]] * num_cols
outliers_3d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_3d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_3d = False
if run_parallel_3d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_3d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_3d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_3d_values[f_idx] = rare_arr_for_i
outliers_3d_arr = [x + y for x, y in zip(outliers_3d_arr, outliers_3d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
#print("outliers_3d_arr_for_i: ", outliers_3d_arr_for_i.count(0), outliers_3d_arr_for_i.count(1))
else:
for i in range(num_cols):
rare_arr_for_i, outliers_3d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_3d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values
)
rare_3d_values[i] = rare_arr_for_i
outliers_3d_arr = [x + y for x, y in zip(outliers_3d_arr, outliers_3d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_3d_values)
output_msg += f"\n\n3d: num common combinations: {out.count(False)}"
output_msg += f"\n3d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("3d: Outlier Counts by score", outliers_3d_arr)
return fractions_3d, rare_3d_values, outliers_3d_arr, outliers_explanation_arr, column_combos_checked
def get_4d_stats(num_combinations):
nonlocal output_msg
# This returns 2 parallel 8d arrays: fractions_4d and rare_4d_values (with the dimensions: i column, j column,
# k column, m column, value in i column, value in j column, value in the k column, value in the m column),
# as well as outliers_43d_arr and outliers_explanation_arr.
fractions_4d = [[]] * num_cols
rare_4d_values = [[]] * num_cols
outliers_4d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_4d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_4d = False
if run_parallel_4d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_4d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_4d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_4d_values[f_idx] = rare_arr_for_i
outliers_4d_arr = [x + y for x, y in zip(outliers_4d_arr, outliers_4d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
else:
for i in range(num_cols):
rare_arr_for_i, outliers_4d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_4d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values
)
rare_4d_values[i] = rare_arr_for_i
outliers_4d_arr = [x + y for x, y in zip(outliers_4d_arr, outliers_4d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_4d_values)
output_msg += f"\n\n4d: num common combinations: {out.count(False)}"
output_msg += f"\n4d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("4d: Outlier Counts by score", outliers_4d_arr)
return fractions_4d, rare_4d_values, outliers_4d_arr, outliers_explanation_arr, column_combos_checked
def get_5d_stats(num_combinations):
nonlocal output_msg
# todo: update this comment. Make more general, so don't repeat it
# This returns 2 parallel 8d arrays: fractions_5d and rare_5d_values (with the dimensions: i column, j column,
# k column, m column, value in i column, value in j column, value in the k column, value in the m column),
# as well as outliers_5d_arr and outliers_explanation_arr.
fractions_5d = [[]] * num_cols
rare_5d_values = [[]] * num_cols
outliers_5d_arr = [0] * num_rows
outliers_explanation_arr = [""] * num_rows
column_combos_checked = 0
run_parallel_5d = self.run_parallel
if num_combinations < 1_000_000:
run_parallel_5d = False
if run_parallel_5d:
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in range(num_cols):
f = executor.submit(process_inner_loop_5d,
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values,
rare_4d_values)
process_arr.append(f)
for f_idx, f in enumerate(process_arr):
rare_arr_for_i, outliers_5d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = f.result()
rare_5d_values[f_idx] = rare_arr_for_i
outliers_5d_arr = [x + y for x, y in zip(outliers_5d_arr, outliers_5d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
else:
for i in range(num_cols):
rare_arr_for_i, outliers_5d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i = process_inner_loop_5d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values,
rare_4d_values
)
rare_5d_values[i] = rare_arr_for_i
outliers_5d_arr = [x + y for x, y in zip(outliers_5d_arr, outliers_5d_arr_for_i)]
outliers_explanation_arr = [x + y for x, y in zip(outliers_explanation_arr, outliers_explanation_arr_for_i)]
column_combos_checked += column_combos_checked_for_i
out = flatten(rare_5d_values)
output_msg += f"\n\n5d: num common combinations: {out.count(False)}"
output_msg += f"\n5d: num rare combinations: {out.count(True)} (Typically most with zero rows)"
format_outlier_counts("5d: Outlier Counts by score", outliers_5d_arr)
return fractions_5d, rare_5d_values, outliers_5d_arr, outliers_explanation_arr, column_combos_checked
def create_output_csv(outliers_1d_arr,
outliers_2d_arr,
outliers_3d_arr,
outliers_4d_arr,
outliers_5d_arr,
explanations_1d_arr,
explanations_2d_arr,
explanations_3d_arr,
explanations_4d_arr,
explanations_5d_arr):
if self.results_folder != "":
try:
mkdir(self.results_folder)
except FileExistsError as e:
pass
except Exception as e:
print(f"Error creating results folder: {e}")
# todo: de-encode from the ordinal values in teh explanations
df = pd.DataFrame({"1d Counts": outliers_1d_arr,
"2d Counts": outliers_2d_arr,
"3d Counts": outliers_3d_arr,
"4d Counts": outliers_4d_arr,
"5d Counts": outliers_5d_arr,
"1d Explanations": explanations_1d_arr,
"2d Explanations": explanations_2d_arr,
"3d Explanations": explanations_3d_arr,
"4d Explanations": explanations_4d_arr,
"5d Explanations": explanations_5d_arr
})
df['Any at 1d'] = df['1d Counts'] > 0
df['Any at 2d'] = df['2d Counts'] > 0
df['Any at 3d'] = df['3d Counts'] > 0
df['Any at 4d'] = df['4d Counts'] > 0
df['Any at 5d'] = df['5d Counts'] > 0
df['Any up to 1d'] = df['1d Counts'] > 0
df['Any up to 2d'] = df['Any up to 1d'] | df['2d Counts'] > 0
df['Any up to 3d'] = df['Any up to 2d'] | df['3d Counts'] > 0
df['Any up to 4d'] = df['Any up to 3d'] | df['4d Counts'] > 0
df['Any up to 5d'] = df['Any up to 4d'] | df['5d Counts'] > 0
df['Any Scored'] = (df['1d Counts'] + df['2d Counts'] + df['3d Counts'] + df['4d Counts'] + df['5d Counts']) > 0
if self.results_folder != "":
n = datetime.now()
dt_string = n.strftime("%d_%m_%Y_%H_%M_%S")
file_name = self.results_folder + "\\" + self.results_name + "_results_" + dt_string + ".csv"
df.to_csv(file_name)
return df
################################
# Start of code
################################
# Bin any numeric columns
self.col_types_arr = self.get_col_types_arr(X)
numeric_col_names = []
for c in range(len(self.col_types_arr)):
if self.col_types_arr[c] == 'N':
numeric_col_names.append(X.columns[c])
# todo: test with k-means as the strategy
est = KBinsDiscretizer(n_bins=self.n_bins, encode='ordinal', strategy='uniform')
if len(numeric_col_names):
X_num = X[numeric_col_names]
Xt = est.fit_transform(X_num)
for num_idx, col_name in enumerate(numeric_col_names):
X[col_name] = Xt[:, num_idx].astype(int)
# Remove any columns with 1 unique value or a very large number of unique values
# todo: make these limits parameters
col_names_arr = []
for c in range(len(X.columns)):
if X[X.columns[c]].nunique() < 2 or X[X.columns[c]].nunique() > 50:
col_names_arr.append(X.columns[c])
X = X.drop(columns=col_names_arr)
num_cols = len(X.columns)
num_rows = len(X)
#output_msg = print_header(dataset_index, dataset_name)
output_msg = f"\nNumber of rows: {num_rows}"
output_msg += f"\nNumber of columns: {num_cols}"
# Create a summary of this run, giving statistics about the outliers found
run_summary_df = pd.DataFrame(columns=[
'Percent Flagged as 1d',
'Percent Flagged as 2d',
'Percent Flagged as 3d',
'Percent Flagged as 4d',
'Percent Flagged as 5d',
'Percent Flagged up to 1d',
'Percent Flagged up to 2d',
'Percent Flagged up to 3d',
'Percent Flagged up to 4d',
'Percent Flagged up to 5d',
'Checked_3d', # False if too many combinations to even check
'Checked_4d',
'Checked_5d',
'3d column combos checked', # Skip column combinations where expected count based on marginal probs is too low.
'4d column combos checked',
'5d column combos checked',
'Percent Flagged'])
if num_cols < 2:
output_msg += "\nLess than two categorical columns found. Cannot determine outliers"
return output_msg, run_summary_df
X = self.ordinal_encode(X)
X_vals = X.values
unique_vals, num_unique_vals, = get_unique_vals()
output_msg += f"\nCardinality of the columns: {num_unique_vals}"
# Determine the 1d stats
fractions_1d, rare_1d_values, outliers_1d_arr, explanations_1d_arr = get_1d_stats()
# Determine the 2d stats
fractions_2d, rare_2d_values, outliers_2d_arr, explanations_2d_arr = get_2d_stats()
# Determine the 3d stats unless there are too many columns and unique values to do so efficiently
checked_3d = False
column_combos_checked_3d = -1
avg_num_unique_vals = mean([len(x) for x in unique_vals])
num_combinations = (num_cols*(num_cols-1)*(num_cols-2)) * pow(avg_num_unique_vals, 3)
if num_combinations > 100_000_000: # todo: set this as a parameter
output_msg += (f"\n\nCannot determine 3d outliers given the number of categorical columns ({num_cols}) and" +
"number of unique values in each.")
outliers_3d_arr = [0] * num_rows
explanations_3d_arr = [""] * num_rows
else:
fractions_3d, rare_3d_values, outliers_3d_arr, explanations_3d_arr, column_combos_checked_3d = \
get_3d_stats(num_combinations=num_combinations)
checked_3d = True
# Determine the 4d stats unless there are too many columns and unique values to do so efficiently
# todo here and above just use pow method
checked_4d = False
column_combos_checked_4d = -1
num_combinations = (num_cols*(num_cols-1)*(num_cols-2)*(num_cols-3)) * pow(avg_num_unique_vals, 4)
outliers_4d_arr = [0] * num_rows
explanations_4d_arr = [""] * num_rows
if num_cols < 4:
output_msg += f"\n\nCannot determine 4d outliers. Too few columns: {num_cols}." # todo: these are printing before the output for 1d, 2d, 3d
elif num_combinations > 100_000_000: # todo: set this as a parameter
output_msg += f"\n\nCannot determine 4d outliers given the number of categorical columns ({num_cols}) and number of unique values in each."
else:
fractions_4d, rare_4d_values, outliers_4d_arr, explanations_4d_arr, column_combos_checked_4d = \
get_4d_stats(num_combinations=num_combinations)
checked_4d = True
# Determine the 5d stats unless there are too many columns and unique values to do so efficiently
checked_5d = False
column_combos_checked_5d = -1
num_combinations = (num_cols*(num_cols-1)*(num_cols-2)*(num_cols-3)*(num_cols-4)) * pow(avg_num_unique_vals, 5)
outliers_5d_arr = [0] * num_rows
explanations_5d_arr = [""] * num_rows
if num_cols < 5:
output_msg += f"\n\nCannot determine 5d outliers. Too few columns: {num_cols}." # todo: these are printing before the output for 1d, 2d, 3d
elif num_combinations > 100_000_000: # todo: set this as a parameter
output_msg += f"\n\nCannot determine 5d outliers given the number of categorical columns ({num_cols}) and number of unique values in each."
else:
fractions_5d, rare_5d_values, outliers_5d_arr, explanations_5d_arr, column_combos_checked_5d = \
get_5d_stats(num_combinations=num_combinations)
checked_5d = True
flagged_rows_df = create_output_csv(
outliers_1d_arr, outliers_2d_arr, outliers_3d_arr, outliers_4d_arr, outliers_5d_arr,
explanations_1d_arr, explanations_2d_arr, explanations_3d_arr, explanations_4d_arr, explanations_5d_arr)
num_rows_scored = list(flagged_rows_df['Any at 1d'] > 0).count(True)
output_msg += f"\n\nNumber of rows flagged as outliers examining 1d: {num_rows_scored}" +\
f" ({round(num_rows_scored*100.0/num_rows,1)}%)"
num_rows_scored = list(flagged_rows_df['Any at 2d'] > 0).count(True)
output_msg += f"\nNumber of rows flagged as outliers examining 2d: {num_rows_scored} " +\
f"({round(num_rows_scored*100.0/num_rows,1)}%)"
num_rows_scored = list(flagged_rows_df['Any at 3d'] > 0).count(True)
output_msg += f"\nNumber of rows flagged as outliers examining 3d: {num_rows_scored} " +\
f"({round(num_rows_scored*100.0/num_rows,1)}%)"
num_rows_scored = list(flagged_rows_df['Any at 4d'] > 0).count(True)
output_msg += f"\nNumber of rows flagged as outliers examining 4d: {num_rows_scored} " +\
f"({round(num_rows_scored*100.0/num_rows,1)}%)"
num_rows_scored = list(flagged_rows_df['Any at 5d'] > 0).count(True)
output_msg += f"\nNumber of rows flagged as outliers examining 5d: {num_rows_scored} " +\
f"({round(num_rows_scored*100.0/num_rows,1)}%)"
# Update run_summary_df
run_summary_df = run_summary_df.append(pd.DataFrame(np.array([[
flagged_rows_df['Any at 1d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any at 2d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any at 3d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any at 4d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any at 5d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any up to 1d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any up to 2d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any up to 3d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any up to 4d'].sum() * 100.0 / num_rows,
flagged_rows_df['Any up to 5d'].sum() * 100.0 / num_rows,
checked_3d,
checked_4d,
checked_5d,
column_combos_checked_3d,
column_combos_checked_4d,
column_combos_checked_5d,
flagged_rows_df['Any Scored'].sum() * 100.0 / num_rows]]),
columns=run_summary_df.columns))
row_explanations = self.output_explanations(flagged_rows_df)
return flagged_rows_df, row_explanations, output_msg, run_summary_df
def output_explanations(self, flagged_rows_df):
df_subset = flagged_rows_df[flagged_rows_df['Any Scored']]
expl_arr = []
index_arr = list(df_subset.index)
for i in range(len(df_subset)):
row = df_subset.iloc[i]
row_expl=[index_arr[i], "", "", "", "", ""]
for i in range(1, self.max_dimensions+1):
col_name = f"{i}d Explanations"
row_expl[i] = row[col_name]
expl_arr.append(row_expl)
expl_df = pd.DataFrame(expl_arr, columns=['Row Index', '1d Explanations', '2d Explanations', '3d Explanations',
'4d Explanations', '5d Explanations'])
return expl_df
# These methods are outside the class so can be called as concurrent processes
def process_inner_loop_3d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values):
num_unique_vals_i = len(unique_vals[i])
outliers_3d_arr_for_i = [0] * num_rows
outliers_explanation_arr_for_i = [""] * num_rows
column_combos_checked_for_i = 0
rare_arr_for_i = [[]] * num_cols
for k in range(num_cols):
rare_arr_for_i[k] = [[]] * num_cols
for j in range(i + 1, num_cols - 1):
num_unique_vals_j = len(unique_vals[j])
for k in range(j + 1, num_cols):
num_unique_vals_k = len(unique_vals[k])
expected_under_uniform = 1.0 / (len(unique_vals[i]) * len(unique_vals[j]) * len(unique_vals[k]))
expected_count_under_uniform = num_rows * expected_under_uniform
if expected_count_under_uniform < 10:
continue
column_combos_checked_for_i += 1
local_rare_arr = [[[False]*num_unique_vals_k]*num_unique_vals_j for _ in range(num_unique_vals_i)]
for i_vals_idx in range(num_unique_vals_i):
if rare_1d_values[i][i_vals_idx]:
continue
i_val = unique_vals[i][i_vals_idx]
cond1 = (X_vals[:, i] == i_val)
for j_vals_idx in range(num_unique_vals_j):
if rare_1d_values[j][j_vals_idx]:
continue
if rare_2d_values[i][j][i_vals_idx][j_vals_idx]:
continue
j_val = unique_vals[j][j_vals_idx]
cond2 = (X_vals[:, j] == j_val)
for k_vals_idx in range(num_unique_vals_k):
if rare_1d_values[k][k_vals_idx]:
continue
if rare_2d_values[i][k][i_vals_idx][k_vals_idx]:
continue
if rare_2d_values[j][k][j_vals_idx][k_vals_idx]:
continue
k_val = unique_vals[k][k_vals_idx]
cond3 = (X_vals[:, k] == k_val)
rows_all = np.where(cond1 & cond2 & cond3)
current_fraction = len(rows_all[0]) / num_rows
three_d_row_nums = rows_all[0]
expected_given_marginal = fractions_1d[i][i_vals_idx] * fractions_1d[j][j_vals_idx] * \
fractions_1d[k][k_vals_idx]
rare_value_flag = (current_fraction < (expected_under_uniform * DIVISOR)) and \
(current_fraction < (expected_given_marginal * DIVISOR)) and \
(current_fraction < 0.01)
if rare_value_flag:
row_nums = three_d_row_nums # todo: can remove some variables here
assert len(row_nums) == round(current_fraction * num_rows), \
f"len of matching rows: {len(row_nums)}, fraction*num_rows: current_fraction*num_rows: {current_fraction * num_rows}"
for r in row_nums:
# todo: i doubt this is threadsafe
outliers_3d_arr_for_i[r] += 1
outliers_explanation_arr_for_i[r] += f" [[[Columns: {i} {j} {k} Values: {i_vals_idx} {j_vals_idx} {k_vals_idx}, Fraction: {current_fraction}]]]"
local_rare_arr[i_vals_idx][j_vals_idx][k_vals_idx] = rare_value_flag
rare_arr_for_i[j][k] = local_rare_arr
return rare_arr_for_i, outliers_3d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i
def process_inner_loop_4d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values
):
num_unique_vals_i = len(unique_vals[i])
outliers_4d_arr_for_i = [0] * num_rows
outliers_explanation_arr_for_i = [""] * num_rows
rare_arr_for_i = [[[[]]*num_cols]*num_cols for _ in range(num_cols)]
column_combos_checked_for_i = 0
max_cardinality = max([len(x) for x in unique_vals])
for j in range(i+1, num_cols-2):
num_unique_vals_j = len(unique_vals[j])
for k in range(j+1, num_cols-1):
num_unique_vals_k = len(unique_vals[k])
for m in range(k+1, num_cols):
num_unique_vals_m = len(unique_vals[m])
expected_under_uniform = 1.0 / (len(unique_vals[i]) * len(unique_vals[j]) * len(unique_vals[k]) * len(unique_vals[m]))
expected_count_under_uniform = num_rows * expected_under_uniform
if expected_count_under_uniform < 10:
continue
column_combos_checked_for_i += 1
local_rare_arr = [[[[False]*max_cardinality]*max_cardinality]*max_cardinality for _ in range(max_cardinality)]
for i_vals_idx in range(num_unique_vals_i):
if rare_1d_values[i][i_vals_idx]:
continue
i_val = unique_vals[i][i_vals_idx]
cond1 = (X_vals[:, i] == i_val)
for j_vals_idx in range(num_unique_vals_j):
if rare_1d_values[j][j_vals_idx]:
continue
j_val = unique_vals[j][j_vals_idx]
cond2 = (X_vals[:, j] == j_val)
if rare_2d_values[i][j][i_vals_idx][j_vals_idx]:
continue
for k_vals_idx in range(num_unique_vals_k):
if rare_1d_values[k][k_vals_idx]:
continue
if rare_2d_values[i][k][i_vals_idx][k_vals_idx]:
continue
if rare_2d_values[j][k][j_vals_idx][k_vals_idx]:
continue
if rare_3d_values[i][j][k][i_vals_idx][j_vals_idx][k_vals_idx]:
continue
k_val = unique_vals[k][k_vals_idx]
cond3 = (X_vals[:, k] == k_val)
for m_vals_idx in range(num_unique_vals_m):
if rare_1d_values[m][m_vals_idx]:
continue
if rare_2d_values[i][m][i_vals_idx][m_vals_idx]:
continue
if rare_2d_values[j][m][j_vals_idx][m_vals_idx]:
continue
if rare_2d_values[k][m][k_vals_idx][m_vals_idx]:
continue
if rare_3d_values[i][j][m][i_vals_idx][j_vals_idx][m_vals_idx]:
continue
if rare_3d_values[i][k][m][i_vals_idx][k_vals_idx][m_vals_idx]:
continue
if rare_3d_values[j][k][m][j_vals_idx][k_vals_idx][m_vals_idx]:
continue
m_val = unique_vals[m][m_vals_idx]
cond4 = (X_vals[:, m] == m_val)
rows_all = np.where(cond1 & cond2 & cond3 & cond4)
current_fraction = len(rows_all[0]) / num_rows
four_d_row_nums = rows_all[0] # todo: use less variables
expected_given_marginal = fractions_1d[i][i_vals_idx] * fractions_1d[j][j_vals_idx] * \
fractions_1d[k][k_vals_idx] * fractions_1d[m][m_vals_idx]
rare_value_flag = (current_fraction < (expected_under_uniform * DIVISOR)) and \
(current_fraction < (expected_given_marginal * DIVISOR)) and \
(current_fraction < 0.01)
if rare_value_flag:
row_nums = four_d_row_nums # todo: can remove some variables here
assert len(row_nums) == round(current_fraction * num_rows), \
f"len of matching rows: {len(row_nums)}, " \
f"fraction*num_rows: current_fraction*num_rows: {current_fraction * num_rows}"
for r in row_nums:
# todo: i doubt this is threadsafe
outliers_4d_arr_for_i[r] += 1
# todo: use the actual values, not their index
outliers_explanation_arr_for_i[r] += \
f" [[[Columns: {i} {j} {k} {m}" \
f"Values: {i_vals_idx} {j_vals_idx} {k_vals_idx} {m_vals_idx}" \
f"Fraction: {current_fraction}]]]"
# todo: remove try-except logic
try:
local_rare_arr[i_vals_idx][j_vals_idx][k_vals_idx][m_vals_idx] = rare_value_flag
except Exception as e:
print(f"here {i_vals_idx}, {j_vals_idx}, {k_vals_idx}, {m_vals_idx}")
rare_arr_for_i[j][k][m] = local_rare_arr
return rare_arr_for_i, outliers_4d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i
def process_inner_loop_5d(
i,
X_vals,
num_cols,
num_rows,
unique_vals,
fractions_1d,
rare_1d_values,
rare_2d_values,
rare_3d_values,
rare_4d_values
):
num_unique_vals_i = len(unique_vals[i])
outliers_5d_arr_for_i = [0] * num_rows
outliers_explanation_arr_for_i = [""] * num_rows
rare_arr_for_i = [[[[[]]*num_cols]*num_cols]*num_cols]*num_cols
column_combos_checked_for_i = 0
for j in range(i+1, num_cols-3):
num_unique_vals_j = len(unique_vals[j])
for k in range(j+1, num_cols-2):
num_unique_vals_k = len(unique_vals[k])
for m in range(k+1, num_cols-1):
num_unique_vals_m = len(unique_vals[m])
for n in range(m+1, num_cols):
num_unique_vals_n = len(unique_vals[n])
expected_under_uniform = 1.0 / (len(unique_vals[i]) * len(unique_vals[j]) * len(unique_vals[k]) * len(unique_vals[m]) * len(unique_vals[n]))
expected_count_under_uniform = num_rows * expected_under_uniform
if expected_count_under_uniform < 10:
continue
column_combos_checked_for_i += 1
# local_rare_arr represents the current set of columns. It's a 5d array, with a dimension
# for each value.
local_rare_arr = [[[[[False]*num_unique_vals_n]*num_unique_vals_m]*num_unique_vals_k]*num_unique_vals_j]*num_unique_vals_i
for i_vals_idx in range(num_unique_vals_i):
if rare_1d_values[i][i_vals_idx]:
continue
i_val = unique_vals[i][i_vals_idx]
cond1 = (X_vals[:, i] == i_val)
for j_vals_idx in range(num_unique_vals_j):
if rare_1d_values[j][j_vals_idx]:
continue
j_val = unique_vals[j][j_vals_idx]
cond2 = (X_vals[:, j] == j_val)
if rare_2d_values[i][j][i_vals_idx][j_vals_idx]:
continue
for k_vals_idx in range(num_unique_vals_k):
if rare_1d_values[k][k_vals_idx]:
continue
if rare_2d_values[i][k][i_vals_idx][k_vals_idx]:
continue
if rare_2d_values[j][k][j_vals_idx][k_vals_idx]:
continue
if rare_3d_values[i][j][k][i_vals_idx][j_vals_idx][k_vals_idx]:
continue
k_val = unique_vals[k][k_vals_idx]
cond3 = (X_vals[:, k] == k_val)
for m_vals_idx in range(num_unique_vals_m):
if rare_1d_values[m][m_vals_idx]:
continue
if rare_2d_values[i][m][i_vals_idx][m_vals_idx]:
continue
if rare_2d_values[j][m][j_vals_idx][m_vals_idx]:
continue
if rare_2d_values[k][m][k_vals_idx][m_vals_idx]:
continue
if rare_3d_values[i][j][m][i_vals_idx][j_vals_idx][m_vals_idx]:
continue
if rare_3d_values[i][k][m][i_vals_idx][k_vals_idx][m_vals_idx]:
continue
if rare_3d_values[j][k][m][j_vals_idx][k_vals_idx][m_vals_idx]:
continue
m_val = unique_vals[m][m_vals_idx]
cond4 = (X_vals[:, m] == m_val)
for n_vals_idx in range(num_unique_vals_n):
if rare_1d_values[n][n_vals_idx]:
continue
if rare_2d_values[i][n][i_vals_idx][n_vals_idx]:
continue
if rare_2d_values[j][n][j_vals_idx][n_vals_idx]:
continue
if rare_2d_values[k][n][k_vals_idx][n_vals_idx]:
continue
if rare_2d_values[m][n][m_vals_idx][n_vals_idx]:
continue
if rare_3d_values[i][j][n][i_vals_idx][j_vals_idx][n_vals_idx]:
continue
if rare_3d_values[i][k][n][i_vals_idx][k_vals_idx][n_vals_idx]:
continue
if rare_3d_values[i][m][n][i_vals_idx][m_vals_idx][n_vals_idx]:
continue
if rare_3d_values[j][k][n][j_vals_idx][k_vals_idx][n_vals_idx]:
continue
if rare_3d_values[j][m][n][j_vals_idx][m_vals_idx][n_vals_idx]:
continue
if rare_3d_values[k][m][n][k_vals_idx][m_vals_idx][n_vals_idx]:
continue
if rare_4d_values[i][j][k][m][i_vals_idx][j_vals_idx][k_vals_idx][m_vals_idx]:
continue
try:
if rare_4d_values[i][j][k][n][i_vals_idx][j_vals_idx][k_vals_idx][n_vals_idx]:
continue
except Exception as e:
print(f" case 2 error: {e}, indexes: {i},{j},{k},{n},{i_vals_idx},{j_vals_idx},{k_vals_idx},{n_vals_idx}")
try:
if rare_4d_values[i][j][m][n][i_vals_idx][j_vals_idx][m_vals_idx][n_vals_idx]:
continue
except Exception as e:
print(f" case 3 error: {e}, indexes: {i},{j},{m},{n},{i_vals_idx},{j_vals_idx},{m_vals_idx},{n_vals_idx}")
if rare_4d_values[i][k][m][n][i_vals_idx][k_vals_idx][m_vals_idx][n_vals_idx]:
continue
try:
if rare_4d_values[j][k][m][n][j_vals_idx][k_vals_idx][m_vals_idx][n_vals_idx]:
continue
except Exception as e:
print(f"case 5 error: {e}, indexes: {j},{k},{m},{n},{j_vals_idx},{k_vals_idx},{m_vals_idx},{n_vals_idx}")
n_val = unique_vals[n][n_vals_idx]
cond5 = (X_vals[:, n] == n_val)
rows_all = np.where(cond1 & cond2 & cond3 & cond4 & cond5)
current_fraction = len(rows_all[0]) / num_rows
five_d_row_nums = rows_all[0] # todo: use less variables
expected_given_marginal = fractions_1d[i][i_vals_idx] * fractions_1d[j][j_vals_idx] * \
fractions_1d[k][k_vals_idx] * fractions_1d[m][m_vals_idx] * fractions_1d[n][n_vals_idx]
rare_value_flag = (current_fraction < (expected_under_uniform * DIVISOR)) and \
(current_fraction < (expected_given_marginal * DIVISOR)) and \
(current_fraction < 0.01)
if rare_value_flag:
row_nums = five_d_row_nums # todo: can remove some variables here
assert len(row_nums) == round(current_fraction * num_rows), \
f"len of matching rows: {len(row_nums)}, fraction*num_rows: current_fraction*num_rows: {current_fraction * num_rows}"
for r in row_nums:
# todo: i doubt this is threadsafe
outliers_5d_arr_for_i[r] += 1
# todo: use the actual values, not their index
outliers_explanation_arr_for_i[r] += f" [[[Columns: {i} {j} {k} {m} {n} Values: {i_vals_idx} {j_vals_idx} {k_vals_idx} {m_vals_idx} {n_vals_idx} Fraction: {current_fraction}]]]"
local_rare_arr[i_vals_idx][j_vals_idx][k_vals_idx][m_vals_idx][n_vals_idx] = rare_value_flag
rare_arr_for_i[j][k][m][n] = local_rare_arr
return rare_arr_for_i, outliers_5d_arr_for_i, outliers_explanation_arr_for_i, column_combos_checked_for_i
| [
"sklearn.preprocessing.KBinsDiscretizer",
"numpy.where",
"pandas.api.types.is_numeric_dtype",
"datetime.datetime.now",
"numpy.random.seed",
"concurrent.futures.ProcessPoolExecutor",
"sklearn.preprocessing.OrdinalEncoder",
"pandas.DataFrame",
"os.mkdir"
] | [((521, 538), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (535, 538), True, 'import numpy as np\n'), ((25759, 25833), 'sklearn.preprocessing.KBinsDiscretizer', 'KBinsDiscretizer', ([], {'n_bins': 'self.n_bins', 'encode': '"""ordinal"""', 'strategy': '"""uniform"""'}), "(n_bins=self.n_bins, encode='ordinal', strategy='uniform')\n", (25775, 25833), False, 'from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer\n'), ((26795, 27251), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Percent Flagged as 1d', 'Percent Flagged as 2d', 'Percent Flagged as 3d',\n 'Percent Flagged as 4d', 'Percent Flagged as 5d',\n 'Percent Flagged up to 1d', 'Percent Flagged up to 2d',\n 'Percent Flagged up to 3d', 'Percent Flagged up to 4d',\n 'Percent Flagged up to 5d', 'Checked_3d', 'Checked_4d', 'Checked_5d',\n '3d column combos checked', '4d column combos checked',\n '5d column combos checked', 'Percent Flagged']"}), "(columns=['Percent Flagged as 1d', 'Percent Flagged as 2d',\n 'Percent Flagged as 3d', 'Percent Flagged as 4d',\n 'Percent Flagged as 5d', 'Percent Flagged up to 1d',\n 'Percent Flagged up to 2d', 'Percent Flagged up to 3d',\n 'Percent Flagged up to 4d', 'Percent Flagged up to 5d', 'Checked_3d',\n 'Checked_4d', 'Checked_5d', '3d column combos checked',\n '4d column combos checked', '5d column combos checked', 'Percent Flagged'])\n", (26807, 27251), True, 'import pandas as pd\n'), ((34441, 34590), 'pandas.DataFrame', 'pd.DataFrame', (['expl_arr'], {'columns': "['Row Index', '1d Explanations', '2d Explanations', '3d Explanations',\n '4d Explanations', '5d Explanations']"}), "(expl_arr, columns=['Row Index', '1d Explanations',\n '2d Explanations', '3d Explanations', '4d Explanations', '5d Explanations']\n )\n", (34453, 34590), True, 'import pandas as pd\n'), ((23608, 23992), 'pandas.DataFrame', 'pd.DataFrame', (["{'1d Counts': outliers_1d_arr, '2d Counts': outliers_2d_arr, '3d Counts':\n outliers_3d_arr, '4d Counts': outliers_4d_arr, '5d Counts':\n outliers_5d_arr, '1d Explanations': explanations_1d_arr,\n '2d Explanations': explanations_2d_arr, '3d Explanations':\n explanations_3d_arr, '4d Explanations': explanations_4d_arr,\n '5d Explanations': explanations_5d_arr}"], {}), "({'1d Counts': outliers_1d_arr, '2d Counts': outliers_2d_arr,\n '3d Counts': outliers_3d_arr, '4d Counts': outliers_4d_arr, '5d Counts':\n outliers_5d_arr, '1d Explanations': explanations_1d_arr,\n '2d Explanations': explanations_2d_arr, '3d Explanations':\n explanations_3d_arr, '4d Explanations': explanations_4d_arr,\n '5d Explanations': explanations_5d_arr})\n", (23620, 23992), True, 'import pandas as pd\n'), ((1784, 1817), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['X[X.columns[c]]'], {}), '(X[X.columns[c]])\n', (1800, 1817), False, 'from pandas.api.types import is_numeric_dtype\n'), ((3012, 3028), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (3026, 3028), False, 'from sklearn.preprocessing import OrdinalEncoder, KBinsDiscretizer\n'), ((25073, 25087), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25085, 25087), False, 'from datetime import datetime\n'), ((2090, 2123), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['X[X.columns[c]]'], {}), '(X[X.columns[c]])\n', (2106, 2123), False, 'from pandas.api.types import is_numeric_dtype\n'), ((2512, 2545), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['X[X.columns[c]]'], {}), '(X[X.columns[c]])\n', (2528, 2545), False, 'from pandas.api.types import is_numeric_dtype\n'), ((4618, 4641), 'numpy.where', 'np.where', (['(cond1 & cond2)'], {}), '(cond1 & cond2)\n', (4626, 4641), True, 'import numpy as np\n'), ((12988, 13028), 'concurrent.futures.ProcessPoolExecutor', 'concurrent.futures.ProcessPoolExecutor', ([], {}), '()\n', (13026, 13028), False, 'import concurrent\n'), ((16474, 16514), 'concurrent.futures.ProcessPoolExecutor', 'concurrent.futures.ProcessPoolExecutor', ([], {}), '()\n', (16512, 16514), False, 'import concurrent\n'), ((20016, 20056), 'concurrent.futures.ProcessPoolExecutor', 'concurrent.futures.ProcessPoolExecutor', ([], {}), '()\n', (20054, 20056), False, 'import concurrent\n'), ((23315, 23341), 'os.mkdir', 'mkdir', (['self.results_folder'], {}), '(self.results_folder)\n', (23320, 23341), False, 'from os import mkdir\n'), ((2611, 2644), 'pandas.api.types.is_numeric_dtype', 'is_numeric_dtype', (['X[X.columns[c]]'], {}), '(X[X.columns[c]])\n', (2627, 2644), False, 'from pandas.api.types import is_numeric_dtype\n'), ((7511, 7538), 'numpy.where', 'np.where', (['(X_vals[:, i] == v)'], {}), '(X_vals[:, i] == v)\n', (7519, 7538), True, 'import numpy as np\n'), ((36927, 36958), 'numpy.where', 'np.where', (['(cond1 & cond2 & cond3)'], {}), '(cond1 & cond2 & cond3)\n', (36935, 36958), True, 'import numpy as np\n'), ((42268, 42307), 'numpy.where', 'np.where', (['(cond1 & cond2 & cond3 & cond4)'], {}), '(cond1 & cond2 & cond3 & cond4)\n', (42276, 42307), True, 'import numpy as np\n'), ((52349, 52396), 'numpy.where', 'np.where', (['(cond1 & cond2 & cond3 & cond4 & cond5)'], {}), '(cond1 & cond2 & cond3 & cond4 & cond5)\n', (52357, 52396), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
This file includes basic helper functions.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.colors as colors
def assignNestedItem(lst, index, value):
"""
Assign a value to an item of an arbitrarily nested list. The list is manipulated inplace.
Args:
lst(list): nested list to assign value to
index(list): list of indices defining the position of the item to set
value: value to assign to list item
"""
x = lst
for i in index[:-1]:
x = x[i]
x[index[-1]] = value
def recursiveIndex(nestedList, query):
"""
Find index of element (first occurrence) in an arbitrarily nested list.
Args:
nestedList(list): list object to search in
query: target element to find
Returns:
list: Position indices
"""
for index, element in enumerate(nestedList):
if isinstance(element, (list, tuple)):
path = recursiveIndex(element, query)
if path:
return [index] + path
if element == query:
return [index]
return []
def flatten(lst):
"""
Flatten arbitrarily nested list. Returns a generator object.
Args:
lst(list): list to flatten
Returns:
Generator object for flattened list (simply call list(flatten(lst)) to get the result as a list).
"""
for i in lst:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i
def createColormap(color, min_factor=1.0, max_factor=0.95):
"""
Creates colormap with range 0-1 from white to arbitrary color.
Args:
color: Matplotlib-readable color representation. Examples: 'g', '#00FFFF', '0.5', [0.1, 0.5, 0.9]
min_factor(float): Float in the range 0-1, specifying the gray-scale color of the minimal plot value.
max_factor(float): Float in the range 0-1, multiplication factor of 'color' argument for maximal plot value.
Returns:
Colormap object to be used by matplotlib-functions
"""
rgb = colors.colorConverter.to_rgb(color)
cdict = {'red': [(0.0, min_factor, min_factor),
(1.0, max_factor*rgb[0], max_factor*rgb[0])],
'green': [(0.0, min_factor, min_factor),
(1.0, max_factor*rgb[1], max_factor*rgb[1])],
'blue': [(0.0, min_factor, min_factor),
(1.0, max_factor*rgb[2], max_factor*rgb[2])]}
return colors.LinearSegmentedColormap('custom', cdict)
def oint(start, stop, num):
"""
Returns evenly spaced numbers over a specified interval. The interval boundaries are NOT included, i.e. the interval
is an open one. Mainly used for parameter values of the low-level (observation) model, to avoid singularities in the
likelihood function.
Args:
start(scalar): Starting value of the sequence
stop(scalar): End value of the sequence
num(int): Number of evenly spaced points within the interval.
Returns:
ndarray: Array of evenly spaced numbers from the specified open interval.
"""
return np.linspace(start, stop, num+2)[1:-1]
def cint(start, stop, num):
"""
Returns evenly spaced numbers over a specified interval. The interval boundaries are included, i.e. the interval is
a closed one. Mainly used for hyper-parameter values of the high-level (transition) model.
Args:
start(scalar): Starting value of the sequence
stop(scalar): End value of the sequence
num(int): Number of evenly spaced points within the interval.
Returns:
ndarray: Array of evenly spaced numbers from the specified closed interval.
"""
return np.linspace(start, stop, num)
def freeSymbols(rv):
"""
Extracts the free symbols/parameters of a probability distribution from a SymPy random variable, independent of the
SymPy version.
Note: In SymPy version <=1.0, the attribute "distribution" was found in rv._sorted_args[0].distribution, while
as of version 1.1, it is found in rv._sorted_args[1].distribution.
Args:
rv: SymPy random variable
Returns:
Free symbols of a SymPy random variable
"""
try:
symbols = rv._sorted_args[0].distribution.free_symbols
except AttributeError:
symbols = rv._sorted_args[1].distribution.free_symbols
return list(symbols)
| [
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.linspace"
] | [((2133, 2168), 'matplotlib.colors.colorConverter.to_rgb', 'colors.colorConverter.to_rgb', (['color'], {}), '(color)\n', (2161, 2168), True, 'import matplotlib.colors as colors\n'), ((2552, 2599), 'matplotlib.colors.LinearSegmentedColormap', 'colors.LinearSegmentedColormap', (['"""custom"""', 'cdict'], {}), "('custom', cdict)\n", (2582, 2599), True, 'import matplotlib.colors as colors\n'), ((3794, 3823), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'num'], {}), '(start, stop, num)\n', (3805, 3823), True, 'import numpy as np\n'), ((3203, 3236), 'numpy.linspace', 'np.linspace', (['start', 'stop', '(num + 2)'], {}), '(start, stop, num + 2)\n', (3214, 3236), True, 'import numpy as np\n')] |
# Code was created by <NAME>, 2020/01/13
# https://github.com/ezygeo-ai/machine-learning-and-geophysical-inversion/blob/master/scripts/fwd_sp.py
import numpy as np
import matplotlib.pyplot as plt
import pickle
# SP forward function
def SPfunc(x_inp, par):
var_x0 = par[0]
var_alpha = par[1]
var_h = par[2]
var_k = par[3]
var_sp = []
for i in x_inp:
var_up = (i - var_x0) * np.cos(var_alpha) - var_h * np.sin(var_alpha)
var_down = ((i - var_x0)*(i - var_x0) + var_h*var_h) ** (3/2)
var = var_k * (var_up / var_down)
var_sp.append(var)
# === give noise for data (Gaussian Noise) 1
std_noise = 10 # = %
mean_noise = 0
noise_data = np.random.normal(mean_noise, np.sqrt(std_noise), len(var_sp))
var_sp_noise = var_sp + noise_data
return var_sp, var_sp_noise, noise_data
# === TEST FORWARD MODELING
x0 = 77.07 # m
alpha = 309.37 * (np.pi/180) # deg2rad
h = 41.81 # m
K = 94686
measure_loc = np.linspace(0, 150, 101) # Location of measurement
print('number of data: ', len(measure_loc))
par_mod = [x0, alpha, h, K] # model parameter of subsurface
get_SPData, get_SPData_noise, noise_from_maxData = SPfunc(measure_loc, par_mod) # forward modeling test
plt.figure()
plt.plot(measure_loc, get_SPData, 'b.')
plt.plot(measure_loc, get_SPData_noise, 'r*')
plt.xlim([0, 150])
plt.ylim([-10, 50])
plt.xlabel('position (m)')
plt.ylabel('SP data (mV)')
plt.legend(['ori', 'noise'])
plt.grid()
plt.figure()
plt.hist(noise_from_maxData, density=True, bins=20)
plt.ylabel('noise distribution')
plt.show()
with open('../data/SP_syn_data.pickle', 'wb') as f:
pickle.dump([measure_loc, get_SPData_noise], f)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"pickle.dump",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"ma... | [((1016, 1040), 'numpy.linspace', 'np.linspace', (['(0)', '(150)', '(101)'], {}), '(0, 150, 101)\n', (1027, 1040), True, 'import numpy as np\n'), ((1291, 1303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1301, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1305, 1344), 'matplotlib.pyplot.plot', 'plt.plot', (['measure_loc', 'get_SPData', '"""b."""'], {}), "(measure_loc, get_SPData, 'b.')\n", (1313, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1391), 'matplotlib.pyplot.plot', 'plt.plot', (['measure_loc', 'get_SPData_noise', '"""r*"""'], {}), "(measure_loc, get_SPData_noise, 'r*')\n", (1354, 1391), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1411), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 150]'], {}), '([0, 150])\n', (1401, 1411), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1432), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-10, 50]'], {}), '([-10, 50])\n', (1421, 1432), True, 'import matplotlib.pyplot as plt\n'), ((1434, 1460), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""position (m)"""'], {}), "('position (m)')\n", (1444, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1488), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SP data (mV)"""'], {}), "('SP data (mV)')\n", (1472, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1490, 1518), 'matplotlib.pyplot.legend', 'plt.legend', (["['ori', 'noise']"], {}), "(['ori', 'noise'])\n", (1500, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1530), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1528, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1546), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1544, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1599), 'matplotlib.pyplot.hist', 'plt.hist', (['noise_from_maxData'], {'density': '(True)', 'bins': '(20)'}), '(noise_from_maxData, density=True, bins=20)\n', (1556, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1633), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""noise distribution"""'], {}), "('noise distribution')\n", (1611, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1635, 1645), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1643, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1755), 'pickle.dump', 'pickle.dump', (['[measure_loc, get_SPData_noise]', 'f'], {}), '([measure_loc, get_SPData_noise], f)\n', (1719, 1755), False, 'import pickle\n'), ((758, 776), 'numpy.sqrt', 'np.sqrt', (['std_noise'], {}), '(std_noise)\n', (765, 776), True, 'import numpy as np\n'), ((424, 441), 'numpy.cos', 'np.cos', (['var_alpha'], {}), '(var_alpha)\n', (430, 441), True, 'import numpy as np\n'), ((452, 469), 'numpy.sin', 'np.sin', (['var_alpha'], {}), '(var_alpha)\n', (458, 469), True, 'import numpy as np\n')] |
#!/usr/bin/python3
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.layers import Conv1D
from tensorflow.keras.layers import SimpleRNNCell, RNN, Input, Reshape, Layer
from tensorflow.keras.layers import GlobalMaxPooling1D
from tensorflow.keras import backend as K
import numpy as np
INPUT_WIDTH = 100
INPUT_LENGTH = 100
NO_OF_PATTERNS = 100
STATE_SIZE = 2
PSC_LENGTH = 10
def gen_pattern(n, length, spking_prob=0.1, seed=1):
rng = np.random.RandomState(seed)
return np.array([rng.binomial(1, spking_prob, n) for _ in range(length)])
class SNNLayer(Layer):
@staticmethod
def InvokeRNN(cell, inputs):
state = [cell.get_initial_state(inputs, None, None)]
for i in range(inputs.shape[1]):
output, state = cell(inputs[:,i,:], state)
n_output = tf.sigmoid(3*output)
yield (output, n_output)
#refraction period
state[0] = state[0]*tf.sigmoid(2*(0.5-n_output))
def __init__(self, units, psc_length, **kwargs):
super(SNNLayer, self).__init__(self, **kwargs)
self.units = units
self.psc_length = psc_length
def build(self, input_shape):
def add_layer(layer):
[self._trainable_weights.append(x) for x in layer.trainable_weights]
if not len(input_shape) == 3:
raise ValueError("expected ndim=3")
self.INPUT_LENGTH = input_shape[1]
self.INPUT_WIDTH = input_shape[2]
with tf.variable_scope(self.name):
# PSC integration as Conv1D, has only 1 channel (=>PSC is uniform), shared among all synapses
self.psc = Conv1D(1, self.psc_length, name='psc', data_format='channels_last', trainable=True)
self.psc.build((None, input_shape[1], 1))
# add psc weights to self
add_layer(self.psc)
self.psc_weights = self.psc.trainable_weights
# RNN unit, has only 1 unit (one neuron)
self.rnn = SimpleRNNCell(self.units, activation=None)
self.rnn.build((None, 1, self.INPUT_WIDTH))
add_layer(self.rnn)
def call(self, inputs, **kwargs):
# The same PSC is applied to all inputs channels
syn_inputs = tf.concat([self.psc(inputs[:,:,i:i+1]) for i in range(self.INPUT_WIDTH)], axis=-1)
# then the RNN units are called
o = tf.stack([o for _, o in SNNLayer.InvokeRNN(self.rnn, syn_inputs)], axis=1)
return o
# generated patterns
g_rng = np.random.RandomState(seed=3000)
x_train = np.stack([gen_pattern(INPUT_WIDTH, INPUT_LENGTH,seed=i) for i in g_rng.randint(1e6, size=50)])
print(x_train.shape)
# assign labels randomly
labels = g_rng.binomial(1, p=0.5, size=50)
S = tf.Session()
K.set_session(S)
x = Input((x_train.shape[1], x_train.shape[2]))
z = SNNLayer(STATE_SIZE, PSC_LENGTH)(x)
q = SNNLayer(1, PSC_LENGTH)
z= q(z)
y = GlobalMaxPooling1D()
z = y(z)
int_output = [q.output, y.output]
model = Model(x, z)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
model.fit(x_train, labels, batch_size=50, epochs=300, verbose=1, validation_data=(x_train, labels))
test_func = K.function([model.input, K.learning_phase()], int_output)
outputs = test_func([x_train[2:3] , 0.])
[print(x[0]) for x in outputs[0][0]] | [
"tensorflow.keras.layers.Input",
"tensorflow.variable_scope",
"tensorflow.Session",
"tensorflow.keras.backend.learning_phase",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.sigmoid",
"tensorflow.keras.backend.set_session",
"tensorflow.keras.layers.SimpleRNNCell",
"tensorflow.keras.models... | [((2513, 2545), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(3000)'}), '(seed=3000)\n', (2534, 2545), True, 'import numpy as np\n'), ((2745, 2757), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2755, 2757), True, 'import tensorflow as tf\n'), ((2758, 2774), 'tensorflow.keras.backend.set_session', 'K.set_session', (['S'], {}), '(S)\n', (2771, 2774), True, 'from tensorflow.keras import backend as K\n'), ((2780, 2823), 'tensorflow.keras.layers.Input', 'Input', (['(x_train.shape[1], x_train.shape[2])'], {}), '((x_train.shape[1], x_train.shape[2]))\n', (2785, 2823), False, 'from tensorflow.keras.layers import SimpleRNNCell, RNN, Input, Reshape, Layer\n'), ((2904, 2924), 'tensorflow.keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (2922, 2924), False, 'from tensorflow.keras.layers import GlobalMaxPooling1D\n'), ((2978, 2989), 'tensorflow.keras.models.Model', 'Model', (['x', 'z'], {}), '(x, z)\n', (2983, 2989), False, 'from tensorflow.keras.models import Model\n'), ((510, 537), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (531, 537), True, 'import numpy as np\n'), ((2017, 2059), 'tensorflow.keras.layers.SimpleRNNCell', 'SimpleRNNCell', (['self.units'], {'activation': 'None'}), '(self.units, activation=None)\n', (2030, 2059), False, 'from tensorflow.keras.layers import SimpleRNNCell, RNN, Input, Reshape, Layer\n'), ((3232, 3250), 'tensorflow.keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (3248, 3250), True, 'from tensorflow.keras import backend as K\n'), ((871, 893), 'tensorflow.sigmoid', 'tf.sigmoid', (['(3 * output)'], {}), '(3 * output)\n', (881, 893), True, 'import tensorflow as tf\n'), ((1524, 1552), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (1541, 1552), True, 'import tensorflow as tf\n'), ((1683, 1770), 'tensorflow.layers.Conv1D', 'Conv1D', (['(1)', 'self.psc_length'], {'name': '"""psc"""', 'data_format': '"""channels_last"""', 'trainable': '(True)'}), "(1, self.psc_length, name='psc', data_format='channels_last',\n trainable=True)\n", (1689, 1770), False, 'from tensorflow.layers import Conv1D\n'), ((992, 1024), 'tensorflow.sigmoid', 'tf.sigmoid', (['(2 * (0.5 - n_output))'], {}), '(2 * (0.5 - n_output))\n', (1002, 1024), True, 'import tensorflow as tf\n')] |
import paddle
import numpy as np
import torch
import torch.optim.lr_scheduler as lr_scheduler
from reprod_log import ReprodLogger
from reprod_log import ReprodDiffHelper
from mobilenetv3_paddle.paddlevision.models import mobilenet_v3_small as mv3_small_paddle
from mobilenetv3_ref.torchvision.models import mobilenet_v3_small as mv3_small_torch
from utilities import train_one_epoch_paddle, train_one_epoch_torch
def test_backward():
max_iter = 3
lr = 1e-3
momentum = 0.9
lr_gamma = 0.1
# set determinnistic flag
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
FLAGS_cudnn_deterministic = True
# load paddle model
paddle.set_device("gpu")
paddle_model = mv3_small_paddle(dropout=0.0)
paddle_model.eval()
paddle_state_dict = paddle.load("./data/mv3_small_paddle.pdparams")
paddle_model.set_dict(paddle_state_dict)
# load torch model
torch_model = mv3_small_torch(dropout=0.0)
torch_model.eval()
torch_state_dict = torch.load("./data/mobilenet_v3_small-047dcff4.pth")
torch_model.load_state_dict(torch_state_dict, strict=False)
# init loss
criterion_paddle = paddle.nn.CrossEntropyLoss()
criterion_torch = torch.nn.CrossEntropyLoss()
# init optimizer
lr_scheduler_paddle = paddle.optimizer.lr.StepDecay(
lr, step_size=max_iter // 3, gamma=lr_gamma)
opt_paddle = paddle.optimizer.Momentum(
learning_rate=lr,
momentum=momentum,
parameters=paddle_model.parameters())
opt_torch = torch.optim.SGD(torch_model.parameters(),
lr=lr,
momentum=momentum)
lr_scheduler_torch = lr_scheduler.StepLR(
opt_torch, step_size=max_iter // 3, gamma=lr_gamma)
# prepare logger & load data
reprod_logger = ReprodLogger()
inputs = np.load("./data/fake_data.npy")
labels = np.load("./data/fake_label.npy")
train_one_epoch_paddle(inputs, labels, paddle_model, criterion_paddle,
opt_paddle, lr_scheduler_paddle, max_iter,
reprod_logger)
train_one_epoch_torch(inputs, labels, torch_model, criterion_torch,
opt_torch, lr_scheduler_torch, max_iter,
reprod_logger)
if __name__ == "__main__":
test_backward()
# load data
diff_helper = ReprodDiffHelper()
torch_info = diff_helper.load_info("./result/losses_ref.npy")
paddle_info = diff_helper.load_info("./result/losses_paddle.npy")
# compare result and produce log
diff_helper.compare_info(torch_info, paddle_info)
diff_helper.report(path="./result/log/backward_diff.log")
| [
"mobilenetv3_paddle.paddlevision.models.mobilenet_v3_small",
"reprod_log.ReprodDiffHelper",
"reprod_log.ReprodLogger",
"torch.nn.CrossEntropyLoss",
"utilities.train_one_epoch_torch",
"torch.load",
"paddle.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.StepLR",
"mobilenetv3_ref.torchvision.models.mo... | [((692, 716), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (709, 716), False, 'import paddle\n'), ((736, 765), 'mobilenetv3_paddle.paddlevision.models.mobilenet_v3_small', 'mv3_small_paddle', ([], {'dropout': '(0.0)'}), '(dropout=0.0)\n', (752, 765), True, 'from mobilenetv3_paddle.paddlevision.models import mobilenet_v3_small as mv3_small_paddle\n'), ((814, 861), 'paddle.load', 'paddle.load', (['"""./data/mv3_small_paddle.pdparams"""'], {}), "('./data/mv3_small_paddle.pdparams')\n", (825, 861), False, 'import paddle\n'), ((949, 977), 'mobilenetv3_ref.torchvision.models.mobilenet_v3_small', 'mv3_small_torch', ([], {'dropout': '(0.0)'}), '(dropout=0.0)\n', (964, 977), True, 'from mobilenetv3_ref.torchvision.models import mobilenet_v3_small as mv3_small_torch\n'), ((1024, 1076), 'torch.load', 'torch.load', (['"""./data/mobilenet_v3_small-047dcff4.pth"""'], {}), "('./data/mobilenet_v3_small-047dcff4.pth')\n", (1034, 1076), False, 'import torch\n'), ((1181, 1209), 'paddle.nn.CrossEntropyLoss', 'paddle.nn.CrossEntropyLoss', ([], {}), '()\n', (1207, 1209), False, 'import paddle\n'), ((1232, 1259), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (1257, 1259), False, 'import torch\n'), ((1308, 1382), 'paddle.optimizer.lr.StepDecay', 'paddle.optimizer.lr.StepDecay', (['lr'], {'step_size': '(max_iter // 3)', 'gamma': 'lr_gamma'}), '(lr, step_size=max_iter // 3, gamma=lr_gamma)\n', (1337, 1382), False, 'import paddle\n'), ((1709, 1780), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['opt_torch'], {'step_size': '(max_iter // 3)', 'gamma': 'lr_gamma'}), '(opt_torch, step_size=max_iter // 3, gamma=lr_gamma)\n', (1728, 1780), True, 'import torch.optim.lr_scheduler as lr_scheduler\n'), ((1844, 1858), 'reprod_log.ReprodLogger', 'ReprodLogger', ([], {}), '()\n', (1856, 1858), False, 'from reprod_log import ReprodLogger\n'), ((1872, 1903), 'numpy.load', 'np.load', (['"""./data/fake_data.npy"""'], {}), "('./data/fake_data.npy')\n", (1879, 1903), True, 'import numpy as np\n'), ((1917, 1949), 'numpy.load', 'np.load', (['"""./data/fake_label.npy"""'], {}), "('./data/fake_label.npy')\n", (1924, 1949), True, 'import numpy as np\n'), ((1955, 2087), 'utilities.train_one_epoch_paddle', 'train_one_epoch_paddle', (['inputs', 'labels', 'paddle_model', 'criterion_paddle', 'opt_paddle', 'lr_scheduler_paddle', 'max_iter', 'reprod_logger'], {}), '(inputs, labels, paddle_model, criterion_paddle,\n opt_paddle, lr_scheduler_paddle, max_iter, reprod_logger)\n', (1977, 2087), False, 'from utilities import train_one_epoch_paddle, train_one_epoch_torch\n'), ((2143, 2270), 'utilities.train_one_epoch_torch', 'train_one_epoch_torch', (['inputs', 'labels', 'torch_model', 'criterion_torch', 'opt_torch', 'lr_scheduler_torch', 'max_iter', 'reprod_logger'], {}), '(inputs, labels, torch_model, criterion_torch,\n opt_torch, lr_scheduler_torch, max_iter, reprod_logger)\n', (2164, 2270), False, 'from utilities import train_one_epoch_paddle, train_one_epoch_torch\n'), ((2403, 2421), 'reprod_log.ReprodDiffHelper', 'ReprodDiffHelper', ([], {}), '()\n', (2419, 2421), False, 'from reprod_log import ReprodDiffHelper\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 12:47:18 2022
A small investigation into the correlation of errors in the dD and d18O is performed
from the snow cores at EastGRIP.
@author: michaeltown
"""
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import numpy as np
import datetime as dt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
def plotResid(yTrue,yResid,xlabelR,ylabelR,titleR):
fig = plt.figure();
zeroLine = yTrue*0;
plt.plot(yTrue,yResid,color = 'blue',marker = 'o',alpha = 0.5,ls = 'None');
plt.plot(yTrue,zeroLine,'k--')
plt.xlabel(xlabelR);
plt.ylabel(ylabelR);
plt.grid;
plt.title(titleR);
plt.show;
def regressPipeline_dDd18O(x, y, ts,titleStr, xlab, ylab, xlim, ylim):
xTrain, xTest, yTrain, yTest = train_test_split(x,y,random_state = 42,train_size = ts);
model = sm.OLS(yTrain,xTrain);
results = model.fit();
yNew = results.predict(xTest);
residTest = yNew - yTest;
slopes = results.params[1];
intercept = results.params.const;
r2score= results.rsquared;
# plot a figure
fig1 = plt.figure();
plt.plot(x.iloc[:,1],y,'.',color = 'blue',alpha = 0.2);
plt.plot(xlim,slopes*xlim+intercept,'--',color = 'red',alpha = 0.5)
plt.plot(xlim,8*xlim,'--',color = 'black',alpha = 0.3) # for equilibrium slope
plt.title(titleStr);
plt.xlim(xlim)
plt.ylim(ylim)
plt.ylabel(ylab);
plt.xlabel(xlab);
plt.grid();
xval = xlim[0];
if xval < 0:
xval = xlim[1]*1.3
else:
xval = xlim[1]*0.8
plt.text(xval, ylim[1]*0.8, 'm = ' + str(np.round(slopes,2)))
plt.text(xval, ylim[1]*0.7, 'b = ' + str(np.round(intercept,2)))
plt.text(xval, ylim[1]*0.6, 'r\u00b2 = ' + str(np.round(r2score,2)))
# return the params
return slopes, intercept, r2score
# useful stuff
#symbols
d18Osym = '$\delta^{18}$O'
dDsym = '$\delta$D'
pptsym = 'ppt' # '\textperthousand'
#******************************************
# main
#******************************************
fileLoc = '/home/michaeltown/work/projects/snowiso/data/EastGRIP/';
figureLoc ='/home/michaeltown/work/projects/snowiso/figures/EastGRIP/'
fileNameIso = 'eastGRIP_SCisoData_2016-2019.pkl'
df_iso = pd.read_pickle(fileLoc+fileNameIso);
df_iso.dropna(inplace = True)
testSize = 0.3;
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.d18O_std),df_iso.dD_std, testSize, 'dDstd vs d18Ostd for all EastGRIP Snow Core data',
d18Osym + ' std (ppt)', dDsym + ' std (ppt)', np.asarray([0, 0.15]), np.asarray([0, 1]))
plt.legend(['scatter','regression','eq water line'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_dDstdVsd18Ostd_EastGRIP_2016-2019.jpg')
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.d18O),df_iso.d18O_std, testSize, 'd18Ostd vs d18O for all EastGRIP Snow Core data',
d18Osym + ' (ppt)', d18Osym + ' std (ppt)', np.asarray([-50,-20]), np.asarray([0, 0.15]))
plt.legend(['scatter','regression'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_d18OstdVsd18O_EastGRIP_2016-2019.jpg')
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.dD),df_iso.dD_std, testSize, 'dDstd vs dD for all EastGRIP Snow Core data',
dDsym + ' (ppt)', dDsym + ' std (ppt)', np.asarray([-380,-150]), np.asarray([0, 1]))
plt.legend(['scatter','regression'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_dDstdVsdD_EastGRIP_2016-2019.jpg')
| [
"pandas.read_pickle",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"numpy.round",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.figure",
"statsmodels.api.OLS",
"statsmo... | [((2477, 2514), 'pandas.read_pickle', 'pd.read_pickle', (['(fileLoc + fileNameIso)'], {}), '(fileLoc + fileNameIso)\n', (2491, 2514), True, 'import pandas as pd\n'), ((2840, 2913), 'matplotlib.pyplot.legend', 'plt.legend', (["['scatter', 'regression', 'eq water line']"], {'loc': '"""lower right"""'}), "(['scatter', 'regression', 'eq water line'], loc='lower right')\n", (2850, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2913, 2998), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figureLoc + 'errorCorrelation_dDstdVsd18Ostd_EastGRIP_2016-2019.jpg')"], {}), "(figureLoc +\n 'errorCorrelation_dDstdVsd18Ostd_EastGRIP_2016-2019.jpg')\n", (2924, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3260, 3316), 'matplotlib.pyplot.legend', 'plt.legend', (["['scatter', 'regression']"], {'loc': '"""lower right"""'}), "(['scatter', 'regression'], loc='lower right')\n", (3270, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3402), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figureLoc + 'errorCorrelation_d18OstdVsd18O_EastGRIP_2016-2019.jpg')"], {}), "(figureLoc + 'errorCorrelation_d18OstdVsd18O_EastGRIP_2016-2019.jpg'\n )\n", (3328, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3651, 3707), 'matplotlib.pyplot.legend', 'plt.legend', (["['scatter', 'regression']"], {'loc': '"""lower right"""'}), "(['scatter', 'regression'], loc='lower right')\n", (3661, 3707), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3784), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figureLoc + 'errorCorrelation_dDstdVsdD_EastGRIP_2016-2019.jpg')"], {}), "(figureLoc + 'errorCorrelation_dDstdVsdD_EastGRIP_2016-2019.jpg')\n", (3719, 3784), True, 'import matplotlib.pyplot as plt\n'), ((625, 637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (635, 637), True, 'import matplotlib.pyplot as plt\n'), ((667, 738), 'matplotlib.pyplot.plot', 'plt.plot', (['yTrue', 'yResid'], {'color': '"""blue"""', 'marker': '"""o"""', 'alpha': '(0.5)', 'ls': '"""None"""'}), "(yTrue, yResid, color='blue', marker='o', alpha=0.5, ls='None')\n", (675, 738), True, 'import matplotlib.pyplot as plt\n'), ((747, 779), 'matplotlib.pyplot.plot', 'plt.plot', (['yTrue', 'zeroLine', '"""k--"""'], {}), "(yTrue, zeroLine, 'k--')\n", (755, 779), True, 'import matplotlib.pyplot as plt\n'), ((782, 801), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabelR'], {}), '(xlabelR)\n', (792, 801), True, 'import matplotlib.pyplot as plt\n'), ((807, 826), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabelR'], {}), '(ylabelR)\n', (817, 826), True, 'import matplotlib.pyplot as plt\n'), ((846, 863), 'matplotlib.pyplot.title', 'plt.title', (['titleR'], {}), '(titleR)\n', (855, 863), True, 'import matplotlib.pyplot as plt\n'), ((987, 1041), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'random_state': '(42)', 'train_size': 'ts'}), '(x, y, random_state=42, train_size=ts)\n', (1003, 1041), False, 'from sklearn.model_selection import train_test_split\n'), ((1058, 1080), 'statsmodels.api.OLS', 'sm.OLS', (['yTrain', 'xTrain'], {}), '(yTrain, xTrain)\n', (1064, 1080), True, 'import statsmodels.api as sm\n'), ((1326, 1338), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1336, 1338), True, 'import matplotlib.pyplot as plt\n'), ((1344, 1399), 'matplotlib.pyplot.plot', 'plt.plot', (['x.iloc[:, 1]', 'y', '"""."""'], {'color': '"""blue"""', 'alpha': '(0.2)'}), "(x.iloc[:, 1], y, '.', color='blue', alpha=0.2)\n", (1352, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1404, 1475), 'matplotlib.pyplot.plot', 'plt.plot', (['xlim', '(slopes * xlim + intercept)', '"""--"""'], {'color': '"""red"""', 'alpha': '(0.5)'}), "(xlim, slopes * xlim + intercept, '--', color='red', alpha=0.5)\n", (1412, 1475), True, 'import matplotlib.pyplot as plt\n'), ((1476, 1532), 'matplotlib.pyplot.plot', 'plt.plot', (['xlim', '(8 * xlim)', '"""--"""'], {'color': '"""black"""', 'alpha': '(0.3)'}), "(xlim, 8 * xlim, '--', color='black', alpha=0.3)\n", (1484, 1532), True, 'import matplotlib.pyplot as plt\n'), ((1566, 1585), 'matplotlib.pyplot.title', 'plt.title', (['titleStr'], {}), '(titleStr)\n', (1575, 1585), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1605), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (1599, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1624), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (1618, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {}), '(ylab)\n', (1639, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1667), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlab'], {}), '(xlab)\n', (1661, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1683), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1681, 1683), True, 'import matplotlib.pyplot as plt\n'), ((2606, 2638), 'statsmodels.api.add_constant', 'sm.add_constant', (['df_iso.d18O_std'], {}), '(df_iso.d18O_std)\n', (2621, 2638), True, 'import statsmodels.api as sm\n'), ((2797, 2818), 'numpy.asarray', 'np.asarray', (['[0, 0.15]'], {}), '([0, 0.15])\n', (2807, 2818), True, 'import numpy as np\n'), ((2820, 2838), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (2830, 2838), True, 'import numpy as np\n'), ((3028, 3056), 'statsmodels.api.add_constant', 'sm.add_constant', (['df_iso.d18O'], {}), '(df_iso.d18O)\n', (3043, 3056), True, 'import statsmodels.api as sm\n'), ((3214, 3236), 'numpy.asarray', 'np.asarray', (['[-50, -20]'], {}), '([-50, -20])\n', (3224, 3236), True, 'import numpy as np\n'), ((3237, 3258), 'numpy.asarray', 'np.asarray', (['[0, 0.15]'], {}), '([0, 0.15])\n', (3247, 3258), True, 'import numpy as np\n'), ((3432, 3458), 'statsmodels.api.add_constant', 'sm.add_constant', (['df_iso.dD'], {}), '(df_iso.dD)\n', (3447, 3458), True, 'import statsmodels.api as sm\n'), ((3606, 3630), 'numpy.asarray', 'np.asarray', (['[-380, -150]'], {}), '([-380, -150])\n', (3616, 3630), True, 'import numpy as np\n'), ((3631, 3649), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {}), '([0, 1])\n', (3641, 3649), True, 'import numpy as np\n'), ((1846, 1865), 'numpy.round', 'np.round', (['slopes', '(2)'], {}), '(slopes, 2)\n', (1854, 1865), True, 'import numpy as np\n'), ((1912, 1934), 'numpy.round', 'np.round', (['intercept', '(2)'], {}), '(intercept, 2)\n', (1920, 1934), True, 'import numpy as np\n'), ((1987, 2007), 'numpy.round', 'np.round', (['r2score', '(2)'], {}), '(r2score, 2)\n', (1995, 2007), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import os
from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored
# Get TRs to analyze
input_kwargs ={
"start_stop_pads": (10, 5),
"subj_list": "../../data/train_subjects_list.npy", # Contact us for access to train_subjects_list
}
M1_tr_idx = get_TRs_with_visual_features(clip="MOVIE1",**input_kwargs)
M2_tr_idx = get_TRs_with_visual_features(clip="MOVIE2",**input_kwargs)
M3_tr_idx = get_TRs_with_visual_features(clip="MOVIE3",**input_kwargs)
M4_tr_idx = get_TRs_with_visual_features(clip="MOVIE4",**input_kwargs)
# Get voxels of interst (thin mask) and subjects of interest
thin_mask = np.load("../../data/thin_mask_1.6mm_MNI.npy")
train_subjects = read_json_list(train_subjects_list) #contact us for access to train_subjects_list (the list of participants we have selected for the development set)
# Apply thin mask and zscore data
movie_name_1 = "tfMRI_MOVIE1_7T_AP"
movie_name_2 = "tfMRI_MOVIE2_7T_PA"
movie_name_3 = "tfMRI_MOVIE3_7T_PA"
movie_name_4 = "tfMRI_MOVIE4_7T_AP"
for sub_idx, sub in enumerate(train_subjects):
for file1 in os.listdir("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}".format(SUB = sub, MOVIE = movie_name_1)):
zscored_1 = get_voxels_masked_subset_zscored("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}/{FILE}".format(SUB = sub, MOVIE = movie_name_1, FILE = file1), M1_tr_idx)
for file2 in os.listdir("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}".format(SUB = sub, MOVIE = movie_name_2)):
zscored_2 = get_voxels_masked_subset_zscored("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}/{FILE}".format(SUB = sub, MOVIE = movie_name_2, FILE = file2), M2_tr_idx)
for file3 in os.listdir("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}".format(SUB = sub, MOVIE = movie_name_3)):
zscored_3 = get_voxels_masked_subset_zscored("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}/{FILE}".format(SUB = sub, MOVIE = movie_name_3, FILE = file3), M3_tr_idx)
for file4 in os.listdir("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}".format(SUB = sub, MOVIE = movie_name_4)):
zscored_4 = get_voxels_masked_subset_zscored("../../data/HCP_7T_Movie_Voxel_Space/{SUB}/MNINonLinear/Results/{MOVIE}/{FILE}".format(SUB = sub, MOVIE = movie_name_4, FILE = file4), M4_tr_idx)
# Concatenate movie clips together
movie_1_2 = np.vstack((zscored_1, zscored_2))
movie_1_2_3 = np.vstack((movie_1_2, zscored_3))
movie_1_2_3_4 = np.vstack((movie_1_2_3, zscored_4))
movies_zscored = sp.stats.zscore(movie_1_2_3_4, axis = 0)
np.save("../../data/HCP_7T_Movie_Voxel_Space/pre_processed/sub_{SUB}_zscored_per_run_and_across_4_runs_thin_mask".format(SUB = sub), movies_zscored)
| [
"pre_processing_utils.read_json_list",
"scipy.stats.zscore",
"pre_processing_utils.get_TRs_with_visual_features",
"numpy.vstack",
"numpy.load"
] | [((348, 407), 'pre_processing_utils.get_TRs_with_visual_features', 'get_TRs_with_visual_features', ([], {'clip': '"""MOVIE1"""'}), "(clip='MOVIE1', **input_kwargs)\n", (376, 407), False, 'from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored\n'), ((419, 478), 'pre_processing_utils.get_TRs_with_visual_features', 'get_TRs_with_visual_features', ([], {'clip': '"""MOVIE2"""'}), "(clip='MOVIE2', **input_kwargs)\n", (447, 478), False, 'from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored\n'), ((491, 550), 'pre_processing_utils.get_TRs_with_visual_features', 'get_TRs_with_visual_features', ([], {'clip': '"""MOVIE3"""'}), "(clip='MOVIE3', **input_kwargs)\n", (519, 550), False, 'from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored\n'), ((562, 621), 'pre_processing_utils.get_TRs_with_visual_features', 'get_TRs_with_visual_features', ([], {'clip': '"""MOVIE4"""'}), "(clip='MOVIE4', **input_kwargs)\n", (590, 621), False, 'from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored\n'), ((695, 740), 'numpy.load', 'np.load', (['"""../../data/thin_mask_1.6mm_MNI.npy"""'], {}), "('../../data/thin_mask_1.6mm_MNI.npy')\n", (702, 740), True, 'import numpy as np\n'), ((758, 793), 'pre_processing_utils.read_json_list', 'read_json_list', (['train_subjects_list'], {}), '(train_subjects_list)\n', (772, 793), False, 'from pre_processing_utils import get_TRs_with_visual_features, read_json_list, get_voxels_masked_subset_zscored\n'), ((2595, 2628), 'numpy.vstack', 'np.vstack', (['(zscored_1, zscored_2)'], {}), '((zscored_1, zscored_2))\n', (2604, 2628), True, 'import numpy as np\n'), ((2647, 2680), 'numpy.vstack', 'np.vstack', (['(movie_1_2, zscored_3)'], {}), '((movie_1_2, zscored_3))\n', (2656, 2680), True, 'import numpy as np\n'), ((2701, 2736), 'numpy.vstack', 'np.vstack', (['(movie_1_2_3, zscored_4)'], {}), '((movie_1_2_3, zscored_4))\n', (2710, 2736), True, 'import numpy as np\n'), ((2758, 2796), 'scipy.stats.zscore', 'sp.stats.zscore', (['movie_1_2_3_4'], {'axis': '(0)'}), '(movie_1_2_3_4, axis=0)\n', (2773, 2796), True, 'import scipy as sp\n')] |
import numpy as np
def _is_box_valid(box, roi_dims, area_prcntg=0.1): # default area of the face should be at least 10% the area of ROI
x, y, width, height = box
x_ctr, y_ctr = round(x+(width/2)), round(y+(height/2))
roi_x, roi_y, roi_width, roi_height = roi_dims
if (x_ctr<roi_x) or (y_ctr<roi_y) or (x_ctr>(roi_x+roi_width)) or (y_ctr>(roi_y+roi_height)):
return False, 'bounding box center outside the ROI'
if (width*height) < area_prcntg*(roi_width*roi_height):
return False, 'bounding box area smaller than '+str(area_prcntg)+'* ROI area'
else:
return True, None
def _calc_eucl_dist(point1, point2):
return np.sqrt(np.sum(np.square(np.array(point1) - np.array(point2))))
def get_roi(raw_img, roi_cut):
roi = [round(roi_cut * np.shape(raw_img)[1] / 2), round(roi_cut*np.shape(raw_img)[0]/2),
round(np.shape(raw_img)[1] - roi_cut * np.shape(raw_img)[1]),
round(np.shape(raw_img)[0]-roi_cut*np.shape(raw_img)[0])]
return roi
def get_valid_bboxes(bboxes, roi_dims):
valid_bboxes = []
for box in bboxes:
valid, status = _is_box_valid(box, roi_dims)
if valid:
valid_bboxes.append(box)
return np.array(valid_bboxes)
def get_focused_box(bboxes, img_dims):
# focused based on the distance of center of bbox to the center of image
x_im_ctr, y_img_ctr = round(img_dims[1]/2), round(img_dims[0]/2) # np.shape(image) is (height, width), not (width, height)
min_eucl_dist = _calc_eucl_dist([x_im_ctr, y_img_ctr], [img_dims[1], img_dims[0]]) # assigned the hypothetica; farthest possible bbox
focus_box = []
for box in bboxes:
x, y, width, height = box
x_ctr, y_ctr = round(x+(width/2)), round(y+(height/2))
eucl_dist = _calc_eucl_dist([x_im_ctr, y_img_ctr], [x_ctr, y_ctr])
if eucl_dist < min_eucl_dist:
focus_box = box
min_eucl_dist = eucl_dist
focus_box = np.array(focus_box)
focus_box = np.reshape(focus_box, (1,4))
return focus_box
def disp_msg():
print("""
>>>Detecting Faces in the ROI,
>>>Focusing on the face close to the center of the video frame,
>>>Detecting 68 Facial landmarks,
>>>Detecting and Tracking facial pose
.
.
.
>>>Press Cntl+C to stop the program
""")
| [
"numpy.array",
"numpy.shape",
"numpy.reshape"
] | [((1256, 1278), 'numpy.array', 'np.array', (['valid_bboxes'], {}), '(valid_bboxes)\n', (1264, 1278), True, 'import numpy as np\n'), ((2013, 2032), 'numpy.array', 'np.array', (['focus_box'], {}), '(focus_box)\n', (2021, 2032), True, 'import numpy as np\n'), ((2050, 2079), 'numpy.reshape', 'np.reshape', (['focus_box', '(1, 4)'], {}), '(focus_box, (1, 4))\n', (2060, 2079), True, 'import numpy as np\n'), ((710, 726), 'numpy.array', 'np.array', (['point1'], {}), '(point1)\n', (718, 726), True, 'import numpy as np\n'), ((729, 745), 'numpy.array', 'np.array', (['point2'], {}), '(point2)\n', (737, 745), True, 'import numpy as np\n'), ((897, 914), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (905, 914), True, 'import numpy as np\n'), ((971, 988), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (979, 988), True, 'import numpy as np\n'), ((813, 830), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (821, 830), True, 'import numpy as np\n'), ((854, 871), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (862, 871), True, 'import numpy as np\n'), ((930, 947), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (938, 947), True, 'import numpy as np\n'), ((1000, 1017), 'numpy.shape', 'np.shape', (['raw_img'], {}), '(raw_img)\n', (1008, 1017), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
#import pandas as pd
#import matplotlib.pyplot as plt
####
from FunctionsLayer1.Lattices.latticeConstructor import constructLattice
from FunctionsLayer2.getSampleEnergyArray_MC import getSampleEnergyArray_MC
from FunctionsLayer2.getEnergyHist import getEnergyHist
from FunctionsLayer2.getEnergyExpectation import getEnergyExpectation
from FunctionsLayer2.getPartitionFunctionZ import getPartitionFunctionZ
####
np.random.seed(1001)
### inputs
N1=8
N2=8
a1_x= 1.0
a1_y= 0
##
theta=np.pi/2
a2_x=np.cos(theta)
a2_y=np.sin(theta)
#####
args={}
args['J_const']=-1.0
args['E_field']=0.0
args['power']=3.0
args['a1_x']=a1_x
args['a1_y']=a1_y
#
args['a2_x']=a2_x
args['a2_y']=a2_y
args['N1'] = N1
args['N2'] = N2
args['N_spins']=N1*N2
args['first_neighb']=True
neighbors_tables_list = constructLattice(**args)
args['neighbors_table']=neighbors_tables_list
### must be a divisor of N1*N2
args['max_n_spins_in_basket']=1 #N1*N2/10
args['N_samples'] = 50
args['N_warm_up']=300
args['num_bins']=args['N_samples']
############################
#Temp_init = 10
#Temp_fin = 0.001
#num_temp=100
#dTemp = (Temp_fin-Temp_init)* 1./num_temp
#Temps = [Temp_init + t*dTemp for t in range(num_temp) ]
#Betas = [1./Temp for Temp in Temps]
###########################
beta_init = 1
beta_fin = 0.01
num_temp=30
dBeta = (beta_fin-beta_init)* 1./num_temp
Betas = [beta_init + t*dBeta for t in range(num_temp) ]
Temps = [1./beta for beta in Betas]
Temps.reverse()
Betas.reverse()
##########################
#E_off_set = -100
###########################
log_Z_vs_temp=np.zeros([num_temp, 2])
energy_expectation_dt = np.zeros([num_temp, 2])
scratch=True
for b in range(num_temp):
Temp= Temps[b]
# print Temp
beta=1./Temp
log_Z_beta=0
energy_expct_beta=0
###############################
if b!=0: scratch=False
sampleEnergy_array, args = getSampleEnergyArray_MC(beta, scratch, **args)
# print len(sampleEnergy_array)
energy_hist = getEnergyHist(sampleEnergy_array, **args)
E_off_set = -128 #np.min(sampleEnergy_array)
# total_num_samples=len(sampleEnergy_array)
###############################
energy_expct_beta=getEnergyExpectation(energy_hist, beta, E_off_set, **args)*\
1./(N1*N2)
####
Z = getPartitionFunctionZ(energy_hist, beta, E_off_set, **args)
#
log_Z_beta=np.log(Z)
#
log_Z_beta=log_Z_beta*1./(N1*N2)- \
beta*E_off_set/(args['N1']*args['N2'])
####
log_Z_vs_temp[b, 0] = Temp
log_Z_vs_temp[b, 1] = log_Z_beta
###
energy_expectation_dt[b, 0]=Temp
energy_expectation_dt[b, 1]=energy_expct_beta
#######
dLogZ_vs_temp=np.zeros([num_temp-1, 3])
dLogZ_vs_temp[:, 1] = np.array([log_Z_vs_temp[b+1,1 ]-log_Z_vs_temp[b, 1]\
for b in range(num_temp-1)])
dLogZ_vs_temp[:, 0] = Temps[1:]
#######
dEBeta_vs_temp=np.zeros([num_temp-1, 2])
dEBeta_vs_temp[:, 1]= np.array([ energy_expectation_dt[b+1,1 ]*(1./Temps[b+1])\
- energy_expectation_dt[b,1 ]*(1./Temps[b])
for b in range(num_temp-1)])
dEBeta_vs_temp[:, 0]=Temps[1:]
#######
#S = [energy_expectation_dt[b,1] *(1./Temps[b]) + log_Z_vs_temp[b,1] for b in range(num_temp) ]
#S = np.array(S)
dS = dEBeta_vs_temp[:,1] + dLogZ_vs_temp[:,1]
S=[np.sum(dS[:t]) for t in range(len(dS)) ]
S= np.array(S) +np.log(2)
###############################################################################
fig, frame = plt.subplots(3,1, figsize=[10, 10])
frame[0].bar(energy_hist[:, 0], energy_hist[:,1])
frame[2].plot(Temps[1:], S, '-o',
label='entropy per spin - log(2)\nn_sample=%d'%args['N_samples'])
frame[1].plot(Temps, energy_expectation_dt[:, 1],
'-o', label='energy per spin')
frame[2].legend()
frame[1].legend() | [
"FunctionsLayer2.getPartitionFunctionZ.getPartitionFunctionZ",
"FunctionsLayer1.Lattices.latticeConstructor.constructLattice",
"FunctionsLayer2.getSampleEnergyArray_MC.getSampleEnergyArray_MC",
"FunctionsLayer2.getEnergyHist.getEnergyHist",
"numpy.log",
"FunctionsLayer2.getEnergyExpectation.getEnergyExpec... | [((459, 479), 'numpy.random.seed', 'np.random.seed', (['(1001)'], {}), '(1001)\n', (473, 479), True, 'import numpy as np\n'), ((541, 554), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (547, 554), True, 'import numpy as np\n'), ((560, 573), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (566, 573), True, 'import numpy as np\n'), ((826, 850), 'FunctionsLayer1.Lattices.latticeConstructor.constructLattice', 'constructLattice', ([], {}), '(**args)\n', (842, 850), False, 'from FunctionsLayer1.Lattices.latticeConstructor import constructLattice\n'), ((1591, 1614), 'numpy.zeros', 'np.zeros', (['[num_temp, 2]'], {}), '([num_temp, 2])\n', (1599, 1614), True, 'import numpy as np\n'), ((1639, 1662), 'numpy.zeros', 'np.zeros', (['[num_temp, 2]'], {}), '([num_temp, 2])\n', (1647, 1662), True, 'import numpy as np\n'), ((2659, 2686), 'numpy.zeros', 'np.zeros', (['[num_temp - 1, 3]'], {}), '([num_temp - 1, 3])\n', (2667, 2686), True, 'import numpy as np\n'), ((2851, 2878), 'numpy.zeros', 'np.zeros', (['[num_temp - 1, 2]'], {}), '([num_temp - 1, 2])\n', (2859, 2878), True, 'import numpy as np\n'), ((1889, 1935), 'FunctionsLayer2.getSampleEnergyArray_MC.getSampleEnergyArray_MC', 'getSampleEnergyArray_MC', (['beta', 'scratch'], {}), '(beta, scratch, **args)\n', (1912, 1935), False, 'from FunctionsLayer2.getSampleEnergyArray_MC import getSampleEnergyArray_MC\n'), ((1990, 2031), 'FunctionsLayer2.getEnergyHist.getEnergyHist', 'getEnergyHist', (['sampleEnergy_array'], {}), '(sampleEnergy_array, **args)\n', (2003, 2031), False, 'from FunctionsLayer2.getEnergyHist import getEnergyHist\n'), ((2279, 2338), 'FunctionsLayer2.getPartitionFunctionZ.getPartitionFunctionZ', 'getPartitionFunctionZ', (['energy_hist', 'beta', 'E_off_set'], {}), '(energy_hist, beta, E_off_set, **args)\n', (2300, 2338), False, 'from FunctionsLayer2.getPartitionFunctionZ import getPartitionFunctionZ\n'), ((2360, 2369), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (2366, 2369), True, 'import numpy as np\n'), ((3274, 3288), 'numpy.sum', 'np.sum', (['dS[:t]'], {}), '(dS[:t])\n', (3280, 3288), True, 'import numpy as np\n'), ((3318, 3329), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (3326, 3329), True, 'import numpy as np\n'), ((3331, 3340), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3337, 3340), True, 'import numpy as np\n'), ((2186, 2244), 'FunctionsLayer2.getEnergyExpectation.getEnergyExpectation', 'getEnergyExpectation', (['energy_hist', 'beta', 'E_off_set'], {}), '(energy_hist, beta, E_off_set, **args)\n', (2206, 2244), False, 'from FunctionsLayer2.getEnergyExpectation import getEnergyExpectation\n')] |
from utils import detector_utils as detector_utils
from libs.pconv_layer import PConv2D
import cv2
import tensorflow as tf
import datetime
import argparse
import numpy as np
import keras
thresh = 0.9
moving_num = 3
m_input_size = 256
detection_graph, sess = detector_utils.load_inference_graph()
print("model loading...")
model_hand = keras.models.load_model('model/model_hand.h5', compile=False)
_ = model_hand.predict(np.zeros((1,96,96,3)))
model_partial = keras.models.load_model('model/model_partial.h5', compile=False, custom_objects={'PConv2D': PConv2D})
_ = model_partial.predict([np.zeros((1,m_input_size,m_input_size,3)), np.zeros((1,m_input_size,m_input_size,3))])
flag = False
start_flag = False
status = "none"
matrix = []
predict_num = 0
result = np.zeros((1,3))
def hand_classfier(num_hands_detect, score_thresh, scores, boxes, im_width, im_height, image_np):
global status, predict_num, result, matrix, flag
if (scores[0] > score_thresh):
(left, right, top, bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
boxes[0][0] * im_height, boxes[0][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
# hand classfier
img = cv2.cvtColor(image_np[int(top):int(bottom), int(left):int(right)], cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (96, 96)) / 255
score = model_hand.predict(np.expand_dims(img, axis=0))
result += score
predict_num += 1
#update
if predict_num == moving_num:
if np.argmax(result) == 1:
status = "pointer"
if np.max(result)/moving_num < thresh:
status = "anomaly"
if start_flag == False:
status = "anomaly"
elif np.argmax(result) == 2:
status = "goo"
if np.max(result)/moving_num < thresh:
status = "anomaly"
if start_flag == False:
status = "anomaly"
else:
status = "anomaly"
result *= 0
predict_num = 0
# hand draw
if status == "pointer":#"pointer"
cv2.rectangle(image_np, p1, p2, (77, 77, 255), 3, 1)
elif status == "goo":#"magic"
cv2.rectangle(image_np, p1, p2, (255, 241, 144), 3, 1)
else: #normal
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
#pointer or not
if status == "pointer":
flag = True
matrix.append([int(left), int(top)])
# magic or not
if flag == True and status == "goo":
# Mask
img = np.zeros(image_np.shape, np.uint8)
xy = np.array(matrix)
p1_ = (int(np.min(xy[:,0])), int(np.min(xy[:,1])))
p2_ = (int(np.max(xy[:,0])), int(np.max(xy[:,1])))
cv2.rectangle(img, p1_, p2_, (1, 1, 1), thickness=-1)
img = cv2.resize(img, (m_input_size, m_input_size))
mask = 1-img
# Image + mask
img = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (m_input_size, m_input_size)) / 255
img[mask==0] = 1
predict_img = model_partial.predict([np.expand_dims(img, axis=0), np.expand_dims(mask, axis=0)])
output = cv2.resize(predict_img[0], (image_np.shape[0], image_np.shape[1]))
image_np = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
# hand draw
if status == "pointer":
cv2.rectangle(image_np, p1, p2, (77, 77, 255), 3, 1)
elif status == "magic":
cv2.rectangle(image_np, p1, p2, (255, 241, 144), 3, 1)
else: #normal
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
# pointer draw
if flag == True and not status == "goo":
if len(matrix) > 2:
xy = np.array(matrix)
p1 = (int(np.min(xy[:,0])), int(np.min(xy[:,1])))
p2 = (int(np.max(xy[:,0])), int(np.max(xy[:,1])))
cv2.rectangle(image_np, p1, p2, (255, 77, 77), 3, 1)
return image_np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-sth',
'--scorethreshold',
dest='score_thresh',
type=float,
default=0.6,#0.2
help='Score threshold for displaying bounding boxes')
parser.add_argument(
'-fps',
'--fps',
dest='fps',
type=int,
default=1,
help='Show FPS on detection/display visualization')
parser.add_argument(
'-src',
'--source',
dest='video_source',
default=0,
help='Device index of the camera.')
parser.add_argument(
'-wd',
'--width',
dest='width',
type=int,
default=352,
help='Width of the frames in the video stream.')
parser.add_argument(
'-ht',
'--height',
dest='height',
type=int,
default=288,
help='Height of the frames in the video stream.')
parser.add_argument(
'-ds',
'--display',
dest='display',
type=int,
default=1,
help='Display the detected images using OpenCV. This reduces FPS')
parser.add_argument(
'-num-w',
'--num-workers',
dest='num_workers',
type=int,
default=4,
help='Number of workers.')
parser.add_argument(
'-q-size',
'--queue-size',
dest='queue_size',
type=int,
default=5,
help='Size of the queue.')
args = parser.parse_args()
cap = cv2.VideoCapture(args.video_source)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
start_time = datetime.datetime.now()
num_frames = 0
im_width, im_height = (m_input_size, m_input_size)#(cap.get(3), cap.get(4))
# max number of hands we want to detect/track
num_hands_detect = 2
cv2.namedWindow('Single-Threaded Detection', cv2.WINDOW_NORMAL)
while True:
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
ret, image_np = cap.read()
image_np = image_np[16:272, 48:304]
# image_np = cv2.flip(image_np, 1)
try:
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
# Actual detection. Variable boxes contains the bounding box cordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found atleast one hand (within your score threshold)
boxes, scores = detector_utils.detect_objects(image_np,
detection_graph, sess)
# draw bounding boxes on frame
image_np = hand_classfier(num_hands_detect, args.score_thresh,
scores, boxes, im_width, im_height,
image_np)
# Calculate Frames per second (FPS)
num_frames += 1
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
fps = num_frames / elapsed_time
if (args.display > 0):
# Display FPS on frame
if (args.fps > 0):
detector_utils.draw_fps_on_image("FPS : " + str(int(fps)),
image_np)
cv2.imshow('Single-Threaded Detection',
cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
key = cv2.waitKey(25)&0xFF
if key == ord("q"):
cv2.destroyAllWindows()
break
if key == ord("r"):
flag = False
start_flag = False
status = "none"
matrix = []
if key == ord("s"):
start_flag = True
else:
print("frames processed: ", num_frames, "elapsed time: ",
elapsed_time, "fps: ", str(int(fps)))
| [
"cv2.rectangle",
"keras.models.load_model",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.min",
"numpy.max",
"utils.detector_utils.load_inference_graph",
"datetime.datetime.now",
"numpy.zeros",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"numpy.expand_dims",
"cv2.cvtCol... | [((273, 310), 'utils.detector_utils.load_inference_graph', 'detector_utils.load_inference_graph', ([], {}), '()\n', (308, 310), True, 'from utils import detector_utils as detector_utils\n'), ((352, 413), 'keras.models.load_model', 'keras.models.load_model', (['"""model/model_hand.h5"""'], {'compile': '(False)'}), "('model/model_hand.h5', compile=False)\n", (375, 413), False, 'import keras\n'), ((478, 583), 'keras.models.load_model', 'keras.models.load_model', (['"""model/model_partial.h5"""'], {'compile': '(False)', 'custom_objects': "{'PConv2D': PConv2D}"}), "('model/model_partial.h5', compile=False,\n custom_objects={'PConv2D': PConv2D})\n", (501, 583), False, 'import keras\n'), ((786, 802), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (794, 802), True, 'import numpy as np\n'), ((438, 462), 'numpy.zeros', 'np.zeros', (['(1, 96, 96, 3)'], {}), '((1, 96, 96, 3))\n', (446, 462), True, 'import numpy as np\n'), ((4384, 4409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4407, 4409), False, 'import argparse\n'), ((5932, 5967), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video_source'], {}), '(args.video_source)\n', (5948, 5967), False, 'import cv2\n'), ((6092, 6115), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6113, 6115), False, 'import datetime\n'), ((6301, 6364), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Single-Threaded Detection"""', 'cv2.WINDOW_NORMAL'], {}), "('Single-Threaded Detection', cv2.WINDOW_NORMAL)\n", (6316, 6364), False, 'import cv2\n'), ((608, 652), 'numpy.zeros', 'np.zeros', (['(1, m_input_size, m_input_size, 3)'], {}), '((1, m_input_size, m_input_size, 3))\n', (616, 652), True, 'import numpy as np\n'), ((651, 695), 'numpy.zeros', 'np.zeros', (['(1, m_input_size, m_input_size, 3)'], {}), '((1, m_input_size, m_input_size, 3))\n', (659, 695), True, 'import numpy as np\n'), ((7067, 7129), 'utils.detector_utils.detect_objects', 'detector_utils.detect_objects', (['image_np', 'detection_graph', 'sess'], {}), '(image_np, detection_graph, sess)\n', (7096, 7129), True, 'from utils import detector_utils as detector_utils\n'), ((1397, 1422), 'cv2.resize', 'cv2.resize', (['img', '(96, 96)'], {}), '(img, (96, 96))\n', (1407, 1422), False, 'import cv2\n'), ((1465, 1492), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1479, 1492), True, 'import numpy as np\n'), ((2320, 2372), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(77, 77, 255)', '(3)', '(1)'], {}), '(image_np, p1, p2, (77, 77, 255), 3, 1)\n', (2333, 2372), False, 'import cv2\n'), ((2838, 2872), 'numpy.zeros', 'np.zeros', (['image_np.shape', 'np.uint8'], {}), '(image_np.shape, np.uint8)\n', (2846, 2872), True, 'import numpy as np\n'), ((2891, 2907), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (2899, 2907), True, 'import numpy as np\n'), ((3049, 3102), 'cv2.rectangle', 'cv2.rectangle', (['img', 'p1_', 'p2_', '(1, 1, 1)'], {'thickness': '(-1)'}), '(img, p1_, p2_, (1, 1, 1), thickness=-1)\n', (3062, 3102), False, 'import cv2\n'), ((3122, 3167), 'cv2.resize', 'cv2.resize', (['img', '(m_input_size, m_input_size)'], {}), '(img, (m_input_size, m_input_size))\n', (3132, 3167), False, 'import cv2\n'), ((3243, 3284), 'cv2.cvtColor', 'cv2.cvtColor', (['image_np', 'cv2.COLOR_BGR2RGB'], {}), '(image_np, cv2.COLOR_BGR2RGB)\n', (3255, 3284), False, 'import cv2\n'), ((3520, 3586), 'cv2.resize', 'cv2.resize', (['predict_img[0]', '(image_np.shape[0], image_np.shape[1])'], {}), '(predict_img[0], (image_np.shape[0], image_np.shape[1]))\n', (3530, 3586), False, 'import cv2\n'), ((3611, 3650), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_RGB2BGR'], {}), '(output, cv2.COLOR_RGB2BGR)\n', (3623, 3650), False, 'import cv2\n'), ((3736, 3788), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(77, 77, 255)', '(3)', '(1)'], {}), '(image_np, p1, p2, (77, 77, 255), 3, 1)\n', (3749, 3788), False, 'import cv2\n'), ((4106, 4122), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (4114, 4122), True, 'import numpy as np\n'), ((4262, 4314), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(255, 77, 77)', '(3)', '(1)'], {}), '(image_np, p1, p2, (255, 77, 77), 3, 1)\n', (4275, 4314), False, 'import cv2\n'), ((6645, 6686), 'cv2.cvtColor', 'cv2.cvtColor', (['image_np', 'cv2.COLOR_BGR2RGB'], {}), '(image_np, cv2.COLOR_BGR2RGB)\n', (6657, 6686), False, 'import cv2\n'), ((1627, 1644), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1636, 1644), True, 'import numpy as np\n'), ((2425, 2479), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(255, 241, 144)', '(3)', '(1)'], {}), '(image_np, p1, p2, (255, 241, 144), 3, 1)\n', (2438, 2479), False, 'import cv2\n'), ((2516, 2567), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(77, 255, 9)', '(3)', '(1)'], {}), '(image_np, p1, p2, (77, 255, 9), 3, 1)\n', (2529, 2567), False, 'import cv2\n'), ((3304, 3349), 'cv2.resize', 'cv2.resize', (['img', '(m_input_size, m_input_size)'], {}), '(img, (m_input_size, m_input_size))\n', (3314, 3349), False, 'import cv2\n'), ((3835, 3889), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(255, 241, 144)', '(3)', '(1)'], {}), '(image_np, p1, p2, (255, 241, 144), 3, 1)\n', (3848, 3889), False, 'import cv2\n'), ((3926, 3977), 'cv2.rectangle', 'cv2.rectangle', (['image_np', 'p1', 'p2', '(77, 255, 9)', '(3)', '(1)'], {}), '(image_np, p1, p2, (77, 255, 9), 3, 1)\n', (3939, 3977), False, 'import cv2\n'), ((7938, 7979), 'cv2.cvtColor', 'cv2.cvtColor', (['image_np', 'cv2.COLOR_RGB2BGR'], {}), '(image_np, cv2.COLOR_RGB2BGR)\n', (7950, 7979), False, 'import cv2\n'), ((8002, 8017), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (8013, 8017), False, 'import cv2\n'), ((8073, 8096), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8094, 8096), False, 'import cv2\n'), ((1879, 1896), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1888, 1896), True, 'import numpy as np\n'), ((2932, 2948), 'numpy.min', 'np.min', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (2938, 2948), True, 'import numpy as np\n'), ((2954, 2970), 'numpy.min', 'np.min', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (2960, 2970), True, 'import numpy as np\n'), ((2996, 3012), 'numpy.max', 'np.max', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (3002, 3012), True, 'import numpy as np\n'), ((3018, 3034), 'numpy.max', 'np.max', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (3024, 3034), True, 'import numpy as np\n'), ((3436, 3463), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3450, 3463), True, 'import numpy as np\n'), ((3465, 3493), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3479, 3493), True, 'import numpy as np\n'), ((4146, 4162), 'numpy.min', 'np.min', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (4152, 4162), True, 'import numpy as np\n'), ((4168, 4184), 'numpy.min', 'np.min', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (4174, 4184), True, 'import numpy as np\n'), ((4209, 4225), 'numpy.max', 'np.max', (['xy[:, 0]'], {}), '(xy[:, 0])\n', (4215, 4225), True, 'import numpy as np\n'), ((4231, 4247), 'numpy.max', 'np.max', (['xy[:, 1]'], {}), '(xy[:, 1])\n', (4237, 4247), True, 'import numpy as np\n'), ((7526, 7549), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7547, 7549), False, 'import datetime\n'), ((1705, 1719), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1711, 1719), True, 'import numpy as np\n'), ((1953, 1967), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1959, 1967), True, 'import numpy as np\n')] |
if '__file__' in globals():
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import numpy as np
import dezero as dz
x = dz.Variable(np.array(2.0))
y = x ** 2
y.backward(create_graph=True)
gx = x.grad
x.cleargrad()
z = gx ** 3 + y
z.backward()
print(x.grad)
| [
"os.path.dirname",
"numpy.array"
] | [((182, 195), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (190, 195), True, 'import numpy as np\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n')] |
# -*- coding: UTF-8 -*-
import numpy as np
from dtw import dtw
from numpy.linalg import norm
# from librosa.display import specshow
# import matplotlib as plt
from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
[Fs, k] = audioBasicIO.readAudioFile("D:/ML/dataset_paper/person1/throat1.wav")
F = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)
# np.savetxt("dtw1.csv", F, delimiter=',')
mfcc_1 = F[9:21]
print(mfcc_1.shape)
# my_file = ['39678_train.wav', '39678_test.wav', '39678_test2.wav', '39678_machine.wav', '40096.wav']
my_file = ['throat1.wav', 'throat2.wav', 'throat3.wav', 'common1.wav', 'common2.wav']
for file in my_file:
[Fs, k] = audioBasicIO.readAudioFile("D:/ML/dataset_paper/person1/" + file)
F = audioFeatureExtraction.stFeatureExtraction(k, Fs, 256, 80)
mfcc_2 = F[9:21]
dist, cost, acc_cost, path = dtw(mfcc_1.T, mfcc_2.T, dist=lambda x, y: norm(x - y, ord=1))
print(file, ':Normalized distance between the two sounds:', dist)
imshow(cost.T, origin='lower', cmap='gray', interpolation='nearest')
plot(path[0], path[1], 'w')
xlim((-0.5, cost.shape[0] - 0.5))
ylim((-0.5, cost.shape[1] - 0.5))
title(file)
show()
| [
"pyAudioAnalysis.audioBasicIO.readAudioFile",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction",
"numpy.linalg.norm",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show"
] | [((331, 400), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (['"""D:/ML/dataset_paper/person1/throat1.wav"""'], {}), "('D:/ML/dataset_paper/person1/throat1.wav')\n", (357, 400), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((405, 463), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['k', 'Fs', '(256)', '(80)'], {}), '(k, Fs, 256, 80)\n', (447, 463), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((772, 837), 'pyAudioAnalysis.audioBasicIO.readAudioFile', 'audioBasicIO.readAudioFile', (["('D:/ML/dataset_paper/person1/' + file)"], {}), "('D:/ML/dataset_paper/person1/' + file)\n", (798, 837), False, 'from pyAudioAnalysis import audioBasicIO\n'), ((846, 904), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['k', 'Fs', '(256)', '(80)'], {}), '(k, Fs, 256, 80)\n', (888, 904), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((1096, 1164), 'matplotlib.pyplot.imshow', 'imshow', (['cost.T'], {'origin': '"""lower"""', 'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(cost.T, origin='lower', cmap='gray', interpolation='nearest')\n", (1102, 1164), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1169, 1196), 'matplotlib.pyplot.plot', 'plot', (['path[0]', 'path[1]', '"""w"""'], {}), "(path[0], path[1], 'w')\n", (1173, 1196), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1201, 1234), 'matplotlib.pyplot.xlim', 'xlim', (['(-0.5, cost.shape[0] - 0.5)'], {}), '((-0.5, cost.shape[0] - 0.5))\n', (1205, 1234), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1239, 1272), 'matplotlib.pyplot.ylim', 'ylim', (['(-0.5, cost.shape[1] - 0.5)'], {}), '((-0.5, cost.shape[1] - 0.5))\n', (1243, 1272), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1277, 1288), 'matplotlib.pyplot.title', 'title', (['file'], {}), '(file)\n', (1282, 1288), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1294, 1300), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (1298, 1300), False, 'from matplotlib.pyplot import imshow, plot, xlim, ylim, show, title\n'), ((1002, 1020), 'numpy.linalg.norm', 'norm', (['(x - y)'], {'ord': '(1)'}), '(x - y, ord=1)\n', (1006, 1020), False, 'from numpy.linalg import norm\n')] |
# coding: utf-8
# # Experimenting with how to do fdr correction on masked array
# In[1]:
import os
from os.path import join as opj
# from nipype.interfaces import afni
import nibabel as nib
import json
import numpy as np
import os
from os.path import join as opj
import itertools
import nibabel as nib
from multiprocessing import Pool
# In[2]:
# # Paths
#
# path_cwd = os.getcwd()
# path_split_list = path_cwd.split('/')
# s = path_split_list[0:-1] # for getting to the parent dir of pwd
# s = opj('/',*s) # *s converts list to path, # very important to add '/' in the begining so it is read as directory later
#
#
#
# # In[3]:
#
#
# # os.chdir('/home1/varunk/Autism-Connectome-Analysis-bids-related/')
# # json_path = opj(data_directory,'task-rest_bold.json')
#
# json_path = 'scripts/json/paths.json'
# with open(json_path, 'rt') as fp:
# task_info = json.load(fp)
#
#
#
# # In[4]:
#
#
#
#
# base_directory = opj(s,task_info["base_directory_for_results"])
# motion_correction_bet_directory = task_info["motion_correction_bet_directory"]
# parent_wf_directory = task_info["parent_wf_directory"]
# # functional_connectivity_directory = task_info["functional_connectivity_directory"]
# functional_connectivity_directory = 'temp_fc'
# coreg_reg_directory = task_info["coreg_reg_directory"]
# atlas_resize_reg_directory = task_info["atlas_resize_reg_directory"]
# data_directory = opj(s,task_info["data_directory"])
# datasink_name = task_info["datasink_name"]
# # fc_datasink_name = task_info["fc_datasink_name"]
# fc_datasink_name = 'temp_dataSink'
# atlasPath = opj(s,task_info["atlas_path"])
#
# hypothesis_test_dir = opj(base_directory, task_info["hypothesis_test_dir"])
# -----------------------------------------------------------------------------------------------------------------------
# In[5]:
# base_directory
# In[6]:
# brain_voxel_list_rand = np.random.rand(10)
# In[7]:
def count_voxel_stats(pvals_list, qvals_list, map_logp_list, map_logq_list):
# P_brain_voxel_list, Q_brain_voxel_list = Pval_Qval_tuple
map_logp_list = np.absolute(map_logp_list)
map_logq_list = np.absolute(map_logq_list)
# min p value
min_pval = np.min(pvals_list)
# min q value
min_qval = np.min(qvals_list)
# p value less than 0.1
p_lt_point_1 = np.shape(np.where(pvals_list < 0.1))[1]
# p value less than 0.01
p_lt_point_01 = np.shape(np.where(pvals_list < 0.01))[1]
# p value less than 0.05
p_lt_point_05 = np.shape(np.where(pvals_list < 0.05))[1]
# p value less than 0.1
q_lt_point_1 = np.shape(np.where(qvals_list < 0.1))[1]
# p value less than 0.01
q_lt_point_01 = np.shape(np.where(qvals_list < 0.01))[1]
# p value less than 0.05
q_lt_point_05 = np.shape(np.where(qvals_list < 0.05))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >1.3 (t 0.5)
logq_gt_1point3 = np.shape(np.where(map_logq_list > 1.3))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >1 (t 0.1)
logq_gt_1 = np.shape(np.where(map_logq_list > 1))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(Q)))) >2 (t 0.01)
logq_gt_2 = np.shape(np.where(map_logq_list > 2))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >1.3 (t 0.5)
logp_gt_1point3 = np.shape(np.where(map_logp_list > 1.3))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >1 (t 0.1)
logp_gt_1 = np.shape(np.where(map_logp_list > 1))[1]
# Voxels with abs(sign(C1MinusC2)(-1*log10(P)))) >2 (t 0.01)
logp_gt_2 = np.shape(np.where(map_logp_list > 2))[1]
return min_pval,min_qval,p_lt_point_1,p_lt_point_01,p_lt_point_05,q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,logp_gt_1point3, logp_gt_1, logp_gt_2
# In[8]:
def fdr_correction_and_viz(Pvals_path, Tvals_path, C1_path, C2_path, mask_path, save_destination, affine, header, combination):
alpha = 0.05
Pvals = np.load(Pvals_path)
Tvals= np.load(Tvals_path)
C1 = np.load(C1_path)
C2 = np.load(C2_path)
mask = nib.load(mask_path).get_data()
brain_indices = np.where(mask != 0 )
from statsmodels.sandbox.stats.multicomp import fdrcorrection0
Pvals_shape = Pvals.shape
Qvals = np.zeros(Pvals_shape)
map_C1MinusC2 = C1 - C2
# sign(c1-c2) * -1 * log10(p)
map_logp = np.multiply(np.sign(map_C1MinusC2),(-1*np.log10(Pvals)))
roi_voxel_stats_matrix = np.zeros((Pvals_shape[3], 14)) # cozthere are 14 statistical attributes
for roi in range(Pvals_shape[3]):
print('Computing Stats for ROI: ',roi)
# pvals = ma.masked_array(Pvals[0], mask = mask, fill_value = 0)
pvals = Pvals[:,:,:,roi]
pvals_shape = pvals.shape
# inp = pvals[~pvals.mask]
# Flatten inp and check if you get back the original matrix after
# inp = inp.ravel()
pvals_list = pvals[brain_indices]
_, qvals_list = fdrcorrection0(pvals_list,alpha)
# from IPython.core.debugger import Tracer; Tracer()()
# map_logq_list = map_logq[brain_indices]
map_logp_list = map_logp[:,:,:,roi][brain_indices]
# print("Size of map_logp_list ",map_logp_list.shape)
# print("Brain Indices: ", brain_indices)
map_C1MinusC2_list = map_C1MinusC2[:,:,:,roi][brain_indices]
# Calculate voxel stats using the below function
Qvals[:,:,:,roi][brain_indices] = qvals_list
map_logq_list = np.multiply(np.sign(map_C1MinusC2_list),(-1*np.log10(qvals_list)))
# print("Size of map_logq_list ",map_logq_list.shape)
roi_voxel_stats_matrix[roi,:] = count_voxel_stats(pvals_list, qvals_list, map_logp_list, map_logq_list)
# print('Stats Computed for ROI: ',roi)
# Save the CSV file and the Additional Brain file to visualize
# sign(c1-c2) * -1 * log10(q)
map_logq = np.multiply(np.sign(map_C1MinusC2),(-1*np.log10(Qvals)))
save_destination_new = opj(save_destination,combination)
if not os.path.exists(save_destination_new):
os.mkdir(save_destination_new)
print('Saving Files in directory: ', save_destination_new)
print('Saving Stats CSV : ',)
csv_name = 'roi_voxel_stats_' + combination + '.csv'
np.savetxt(csv_name,roi_voxel_stats_matrix,delimiter=',',header='min_pval,min_qval,p_lt_point_1,p_lt_point_01, p_lt_point_05, q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,logp_gt_1point3, logp_gt_1, logp_gt_2'
)
print('Saving Pvals.nii.gz')
Pvals_name = opj(save_destination_new,'Pvals.nii.gz')
Pvals_brain_with_header = nib.Nifti1Image(Pvals, affine= affine,header = header)
nib.save(Pvals_brain_with_header,Pvals_name)
print('Saving Tvals.nii.gz')
Tvals_name = opj(save_destination_new,'Tvals.nii.gz')
Tvals_brain_with_header = nib.Nifti1Image(Tvals, affine= affine,header = header)
nib.save(Tvals_brain_with_header,Tvals_name)
print('Saving Qvals.nii.gz')
Qvals_name = opj(save_destination_new,'Qvals.nii.gz')
Qvals_brain_with_header = nib.Nifti1Image(Qvals, affine= affine,header = header)
nib.save(Qvals_brain_with_header,Qvals_name)
print('Saving C1MinusC2.nii.gz')
C1MinusC2_name = opj(save_destination_new,'C1MinusC2.nii.gz')
C1MinusC2_brain_with_header = nib.Nifti1Image(map_C1MinusC2, affine= affine,header = header)
nib.save(C1MinusC2_brain_with_header,C1MinusC2_name)
print('Saving map_logp.nii.gz')
map_logp_name = opj(save_destination_new,'map_logp.nii.gz')
map_logp_brain_with_header = nib.Nifti1Image(map_logp, affine= affine,header = header)
nib.save(map_logp_brain_with_header,map_logp_name)
print('Saving map_logq.nii.gz')
map_logq_name = opj(save_destination_new,'map_logq.nii.gz')
map_logq_brain_with_header = nib.Nifti1Image(map_logq, affine= affine,header = header)
nib.save(map_logq_brain_with_header,map_logq_name)
# In[ ]:
# base_directory
# In[ ]:
def _main(params):
combination, base_directory, hypothesis_test_dir, header, mask_path, affine, fdr_results_dir = params
#
# motion_param_regression, band_pass_filtering, global_signal_regression,
# smoothing, base_directory, hypothesis_test_dir, header, mask_path, affine,fdr_results_dir = params
# combination = 'motionRegress' + str(int(motion_param_regression)) +\
# 'global' + str(int(global_signal_regression)) + \
# 'smoothing' + str(int(smoothing)) +\
# 'filt' + str(int(band_pass_filtering))
if fdr_results_dir == None:
fdr_results_dir = 'fdr_and_results_modular'
save_destination = opj(base_directory,fdr_results_dir,combination)
if not os.path.exists(save_destination):
os.makedirs(save_destination)
os.chdir(save_destination)
print('Saving the results in ',save_destination)
# ----------
# motion_param_regression, band_pass_filtering, global_signal_regression,smoothing = params
# combination = 'motionRegress' + str(int(motion_param_regression)) + 'filt' + \
# str(int(band_pass_filtering)) + 'global' + str(int(global_signal_regression)) + \
# 'smoothing' + str(int(smoothing))
print("Combination: ",combination)
# print(motion_param_regression, band_pass_filtering, global_signal_regression,smoothing)
Pvals_path = opj(hypothesis_test_dir,combination,'Pvals.npy')
Tvals_path = opj(hypothesis_test_dir,combination,'Tvals.npy')
C1_path = opj(hypothesis_test_dir,combination,'meanC1.npy')
C2_path = opj(hypothesis_test_dir,combination,'meanC2.npy')
fdr_correction_and_viz(Pvals_path, Tvals_path, C1_path, C2_path, mask_path,\
save_destination, affine, header, combination )
# print(C2_path)
def main(paths, calc_residual, smoothing, band_pass_filtering, volCorrect,
num_proc = 7, calc_residual_options = None):
# json_path=paths[0]
base_directory=paths['base_directory']
# motion_correction_bet_directory=paths[2]
# parent_wf_directory=paths[3]
# functional_connectivity_directory=paths[4]
# coreg_reg_directory=paths[5]
# atlas_resize_reg_directory=paths[6]
# subject_list = paths[7]
# datasink_name=paths[8]
fc_datasink_name=paths['fc_datasink_name']
# atlasPath=paths[10]
brain_path=paths['brain_path']
# mask_path=paths[12]
# atlas_path=paths[13]
# tr_path=paths[14]
# motion_params_path=paths[15]
# func2std_mat_path=paths[16]
# MNI3mm_path=paths[17]
# demographics_file_path = paths[18]
# phenotype_file_path = paths[19]
# data_directory = paths[20]
hypothesis_test_dir = paths['hypothesis_test_dir']
fdr_results_dir = paths['fdr_results_dir']
binarized_atlas_mask_path = paths['binarized_atlas_mask_path']
# import pdb; pdb.set_trace()
# itr = (list(itertools.product([0, 1], repeat=3)))
# itr = [(1,0,0,1)]
# ,(1,1,1)
# mask_path = mni2mmMask # MNI 3mm brain voxels
# mask_path = opj(base_directory,parent_wf_directory,motion_correction_bet_directory,coreg_reg_directory,'resample_mni/MNI152_T1_2mm_brain_resample_mask.nii.gz')
mask_path = binarized_atlas_mask_path
# '/home1/varunk/atlas/Full_brain_atlas_thr0-2mm/fullbrain_atlas_thr0-3mm_binarized.nii.gz'
# motion_param_regression, global_signal_regression, smoothing,band_pass_filtering, volCorrect = params # just execute again-- hereee
# combination = 'motionRegress' + str(int(motion_param_regression)) +\
# 'global' + str(int(global_signal_regression)) + \
# 'smoothing' + str(int(smoothing)) +\
# 'filt' + str(int(band_pass_filtering))
comb = ''
for a in calc_residual_options:
comb = comb + a
combination = 'calc_residual' + str(int(calc_residual)) + \
'smoothing' + str(int(smoothing)) +\
'filt' + str(int(band_pass_filtering)) +\
'calc_residual_options' + comb
print("Combination: ",combination)
fc_file_list = opj(base_directory,fc_datasink_name,combination,'fc_map_brain_file_list.npy')
# brain_path = '/home1/varunk/results_again_again/fc_motionRegress1filt1global0/_subject_id_0050002/func2std_xform/0050002_fc_map_flirt.nii.gz'
brain_path = fc_file_list # getting a header to be used later to save brain files
brain_path = np.load(brain_path)[0]
brain_data = nib.load(brain_path)
affine=brain_data.affine
header = brain_data.header
pool = Pool(num_proc)
#
# itr = [(motion_param_regression, band_pass_filtering,global_signal_regression,
# smoothing,base_directory, hypothesis_test_dir, header, mask_path, affine,fdr_results_dir)]
itr = [(combination, base_directory, hypothesis_test_dir, header, mask_path, affine, fdr_results_dir)]
data_outputs = pool.map(_main, itr)
# ------------------------------------------------------------------------------------------------------------------------
| [
"os.path.exists",
"numpy.log10",
"nibabel.save",
"os.makedirs",
"nibabel.load",
"numpy.where",
"numpy.absolute",
"os.path.join",
"os.chdir",
"statsmodels.sandbox.stats.multicomp.fdrcorrection0",
"numpy.zeros",
"nibabel.Nifti1Image",
"multiprocessing.Pool",
"numpy.savetxt",
"numpy.min",
... | [((2080, 2106), 'numpy.absolute', 'np.absolute', (['map_logp_list'], {}), '(map_logp_list)\n', (2091, 2106), True, 'import numpy as np\n'), ((2127, 2153), 'numpy.absolute', 'np.absolute', (['map_logq_list'], {}), '(map_logq_list)\n', (2138, 2153), True, 'import numpy as np\n'), ((2188, 2206), 'numpy.min', 'np.min', (['pvals_list'], {}), '(pvals_list)\n', (2194, 2206), True, 'import numpy as np\n'), ((2241, 2259), 'numpy.min', 'np.min', (['qvals_list'], {}), '(qvals_list)\n', (2247, 2259), True, 'import numpy as np\n'), ((3898, 3917), 'numpy.load', 'np.load', (['Pvals_path'], {}), '(Pvals_path)\n', (3905, 3917), True, 'import numpy as np\n'), ((3929, 3948), 'numpy.load', 'np.load', (['Tvals_path'], {}), '(Tvals_path)\n', (3936, 3948), True, 'import numpy as np\n'), ((3958, 3974), 'numpy.load', 'np.load', (['C1_path'], {}), '(C1_path)\n', (3965, 3974), True, 'import numpy as np\n'), ((3984, 4000), 'numpy.load', 'np.load', (['C2_path'], {}), '(C2_path)\n', (3991, 4000), True, 'import numpy as np\n'), ((4067, 4086), 'numpy.where', 'np.where', (['(mask != 0)'], {}), '(mask != 0)\n', (4075, 4086), True, 'import numpy as np\n'), ((4200, 4221), 'numpy.zeros', 'np.zeros', (['Pvals_shape'], {}), '(Pvals_shape)\n', (4208, 4221), True, 'import numpy as np\n'), ((4390, 4420), 'numpy.zeros', 'np.zeros', (['(Pvals_shape[3], 14)'], {}), '((Pvals_shape[3], 14))\n', (4398, 4420), True, 'import numpy as np\n'), ((6020, 6054), 'os.path.join', 'opj', (['save_destination', 'combination'], {}), '(save_destination, combination)\n', (6023, 6054), True, 'from os.path import join as opj\n'), ((6302, 6562), 'numpy.savetxt', 'np.savetxt', (['csv_name', 'roi_voxel_stats_matrix'], {'delimiter': '""","""', 'header': '"""min_pval,min_qval,p_lt_point_1,p_lt_point_01, p_lt_point_05, q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,logp_gt_1point3, logp_gt_1, logp_gt_2"""'}), "(csv_name, roi_voxel_stats_matrix, delimiter=',', header=\n 'min_pval,min_qval,p_lt_point_1,p_lt_point_01, p_lt_point_05, q_lt_point_1, q_lt_point_01,q_lt_point_05, logq_gt_1point3, logq_gt_1 ,logq_gt_2 ,logp_gt_1point3, logp_gt_1, logp_gt_2'\n )\n", (6312, 6562), True, 'import numpy as np\n'), ((6618, 6659), 'os.path.join', 'opj', (['save_destination_new', '"""Pvals.nii.gz"""'], {}), "(save_destination_new, 'Pvals.nii.gz')\n", (6621, 6659), True, 'from os.path import join as opj\n'), ((6689, 6741), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['Pvals'], {'affine': 'affine', 'header': 'header'}), '(Pvals, affine=affine, header=header)\n', (6704, 6741), True, 'import nibabel as nib\n'), ((6748, 6793), 'nibabel.save', 'nib.save', (['Pvals_brain_with_header', 'Pvals_name'], {}), '(Pvals_brain_with_header, Pvals_name)\n', (6756, 6793), True, 'import nibabel as nib\n'), ((6844, 6885), 'os.path.join', 'opj', (['save_destination_new', '"""Tvals.nii.gz"""'], {}), "(save_destination_new, 'Tvals.nii.gz')\n", (6847, 6885), True, 'from os.path import join as opj\n'), ((6915, 6967), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['Tvals'], {'affine': 'affine', 'header': 'header'}), '(Tvals, affine=affine, header=header)\n', (6930, 6967), True, 'import nibabel as nib\n'), ((6974, 7019), 'nibabel.save', 'nib.save', (['Tvals_brain_with_header', 'Tvals_name'], {}), '(Tvals_brain_with_header, Tvals_name)\n', (6982, 7019), True, 'import nibabel as nib\n'), ((7070, 7111), 'os.path.join', 'opj', (['save_destination_new', '"""Qvals.nii.gz"""'], {}), "(save_destination_new, 'Qvals.nii.gz')\n", (7073, 7111), True, 'from os.path import join as opj\n'), ((7141, 7193), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['Qvals'], {'affine': 'affine', 'header': 'header'}), '(Qvals, affine=affine, header=header)\n', (7156, 7193), True, 'import nibabel as nib\n'), ((7200, 7245), 'nibabel.save', 'nib.save', (['Qvals_brain_with_header', 'Qvals_name'], {}), '(Qvals_brain_with_header, Qvals_name)\n', (7208, 7245), True, 'import nibabel as nib\n'), ((7304, 7349), 'os.path.join', 'opj', (['save_destination_new', '"""C1MinusC2.nii.gz"""'], {}), "(save_destination_new, 'C1MinusC2.nii.gz')\n", (7307, 7349), True, 'from os.path import join as opj\n'), ((7383, 7443), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['map_C1MinusC2'], {'affine': 'affine', 'header': 'header'}), '(map_C1MinusC2, affine=affine, header=header)\n', (7398, 7443), True, 'import nibabel as nib\n'), ((7450, 7503), 'nibabel.save', 'nib.save', (['C1MinusC2_brain_with_header', 'C1MinusC2_name'], {}), '(C1MinusC2_brain_with_header, C1MinusC2_name)\n', (7458, 7503), True, 'import nibabel as nib\n'), ((7560, 7604), 'os.path.join', 'opj', (['save_destination_new', '"""map_logp.nii.gz"""'], {}), "(save_destination_new, 'map_logp.nii.gz')\n", (7563, 7604), True, 'from os.path import join as opj\n'), ((7637, 7692), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['map_logp'], {'affine': 'affine', 'header': 'header'}), '(map_logp, affine=affine, header=header)\n', (7652, 7692), True, 'import nibabel as nib\n'), ((7699, 7750), 'nibabel.save', 'nib.save', (['map_logp_brain_with_header', 'map_logp_name'], {}), '(map_logp_brain_with_header, map_logp_name)\n', (7707, 7750), True, 'import nibabel as nib\n'), ((7807, 7851), 'os.path.join', 'opj', (['save_destination_new', '"""map_logq.nii.gz"""'], {}), "(save_destination_new, 'map_logq.nii.gz')\n", (7810, 7851), True, 'from os.path import join as opj\n'), ((7884, 7939), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['map_logq'], {'affine': 'affine', 'header': 'header'}), '(map_logq, affine=affine, header=header)\n', (7899, 7939), True, 'import nibabel as nib\n'), ((7946, 7997), 'nibabel.save', 'nib.save', (['map_logq_brain_with_header', 'map_logq_name'], {}), '(map_logq_brain_with_header, map_logq_name)\n', (7954, 7997), True, 'import nibabel as nib\n'), ((8730, 8779), 'os.path.join', 'opj', (['base_directory', 'fdr_results_dir', 'combination'], {}), '(base_directory, fdr_results_dir, combination)\n', (8733, 8779), True, 'from os.path import join as opj\n'), ((8865, 8891), 'os.chdir', 'os.chdir', (['save_destination'], {}), '(save_destination)\n', (8873, 8891), False, 'import os\n'), ((9431, 9481), 'os.path.join', 'opj', (['hypothesis_test_dir', 'combination', '"""Pvals.npy"""'], {}), "(hypothesis_test_dir, combination, 'Pvals.npy')\n", (9434, 9481), True, 'from os.path import join as opj\n'), ((9497, 9547), 'os.path.join', 'opj', (['hypothesis_test_dir', 'combination', '"""Tvals.npy"""'], {}), "(hypothesis_test_dir, combination, 'Tvals.npy')\n", (9500, 9547), True, 'from os.path import join as opj\n'), ((9560, 9611), 'os.path.join', 'opj', (['hypothesis_test_dir', 'combination', '"""meanC1.npy"""'], {}), "(hypothesis_test_dir, combination, 'meanC1.npy')\n", (9563, 9611), True, 'from os.path import join as opj\n'), ((9624, 9675), 'os.path.join', 'opj', (['hypothesis_test_dir', 'combination', '"""meanC2.npy"""'], {}), "(hypothesis_test_dir, combination, 'meanC2.npy')\n", (9627, 9675), True, 'from os.path import join as opj\n'), ((12073, 12158), 'os.path.join', 'opj', (['base_directory', 'fc_datasink_name', 'combination', '"""fc_map_brain_file_list.npy"""'], {}), "(base_directory, fc_datasink_name, combination, 'fc_map_brain_file_list.npy'\n )\n", (12076, 12158), True, 'from os.path import join as opj\n'), ((12444, 12464), 'nibabel.load', 'nib.load', (['brain_path'], {}), '(brain_path)\n', (12452, 12464), True, 'import nibabel as nib\n'), ((12538, 12552), 'multiprocessing.Pool', 'Pool', (['num_proc'], {}), '(num_proc)\n', (12542, 12552), False, 'from multiprocessing import Pool\n'), ((4314, 4336), 'numpy.sign', 'np.sign', (['map_C1MinusC2'], {}), '(map_C1MinusC2)\n', (4321, 4336), True, 'import numpy as np\n'), ((4925, 4958), 'statsmodels.sandbox.stats.multicomp.fdrcorrection0', 'fdrcorrection0', (['pvals_list', 'alpha'], {}), '(pvals_list, alpha)\n', (4939, 4958), False, 'from statsmodels.sandbox.stats.multicomp import fdrcorrection0\n'), ((5945, 5967), 'numpy.sign', 'np.sign', (['map_C1MinusC2'], {}), '(map_C1MinusC2)\n', (5952, 5967), True, 'import numpy as np\n'), ((6065, 6101), 'os.path.exists', 'os.path.exists', (['save_destination_new'], {}), '(save_destination_new)\n', (6079, 6101), False, 'import os\n'), ((6111, 6141), 'os.mkdir', 'os.mkdir', (['save_destination_new'], {}), '(save_destination_new)\n', (6119, 6141), False, 'import os\n'), ((8789, 8821), 'os.path.exists', 'os.path.exists', (['save_destination'], {}), '(save_destination)\n', (8803, 8821), False, 'import os\n'), ((8831, 8860), 'os.makedirs', 'os.makedirs', (['save_destination'], {}), '(save_destination)\n', (8842, 8860), False, 'import os\n'), ((12404, 12423), 'numpy.load', 'np.load', (['brain_path'], {}), '(brain_path)\n', (12411, 12423), True, 'import numpy as np\n'), ((2317, 2343), 'numpy.where', 'np.where', (['(pvals_list < 0.1)'], {}), '(pvals_list < 0.1)\n', (2325, 2343), True, 'import numpy as np\n'), ((2407, 2434), 'numpy.where', 'np.where', (['(pvals_list < 0.01)'], {}), '(pvals_list < 0.01)\n', (2415, 2434), True, 'import numpy as np\n'), ((2498, 2525), 'numpy.where', 'np.where', (['(pvals_list < 0.05)'], {}), '(pvals_list < 0.05)\n', (2506, 2525), True, 'import numpy as np\n'), ((2587, 2613), 'numpy.where', 'np.where', (['(qvals_list < 0.1)'], {}), '(qvals_list < 0.1)\n', (2595, 2613), True, 'import numpy as np\n'), ((2677, 2704), 'numpy.where', 'np.where', (['(qvals_list < 0.01)'], {}), '(qvals_list < 0.01)\n', (2685, 2704), True, 'import numpy as np\n'), ((2768, 2795), 'numpy.where', 'np.where', (['(qvals_list < 0.05)'], {}), '(qvals_list < 0.05)\n', (2776, 2795), True, 'import numpy as np\n'), ((2894, 2923), 'numpy.where', 'np.where', (['(map_logq_list > 1.3)'], {}), '(map_logq_list > 1.3)\n', (2902, 2923), True, 'import numpy as np\n'), ((3014, 3041), 'numpy.where', 'np.where', (['(map_logq_list > 1)'], {}), '(map_logq_list > 1)\n', (3022, 3041), True, 'import numpy as np\n'), ((3133, 3160), 'numpy.where', 'np.where', (['(map_logq_list > 2)'], {}), '(map_logq_list > 2)\n', (3141, 3160), True, 'import numpy as np\n'), ((3259, 3288), 'numpy.where', 'np.where', (['(map_logp_list > 1.3)'], {}), '(map_logp_list > 1.3)\n', (3267, 3288), True, 'import numpy as np\n'), ((3379, 3406), 'numpy.where', 'np.where', (['(map_logp_list > 1)'], {}), '(map_logp_list > 1)\n', (3387, 3406), True, 'import numpy as np\n'), ((3498, 3525), 'numpy.where', 'np.where', (['(map_logp_list > 2)'], {}), '(map_logp_list > 2)\n', (3506, 3525), True, 'import numpy as np\n'), ((4015, 4034), 'nibabel.load', 'nib.load', (['mask_path'], {}), '(mask_path)\n', (4023, 4034), True, 'import nibabel as nib\n'), ((4341, 4356), 'numpy.log10', 'np.log10', (['Pvals'], {}), '(Pvals)\n', (4349, 4356), True, 'import numpy as np\n'), ((5469, 5496), 'numpy.sign', 'np.sign', (['map_C1MinusC2_list'], {}), '(map_C1MinusC2_list)\n', (5476, 5496), True, 'import numpy as np\n'), ((5972, 5987), 'numpy.log10', 'np.log10', (['Qvals'], {}), '(Qvals)\n', (5980, 5987), True, 'import numpy as np\n'), ((5501, 5521), 'numpy.log10', 'np.log10', (['qvals_list'], {}), '(qvals_list)\n', (5509, 5521), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
from utils import apply_regr
from non_max_suppression import nm_suppress
class ROIPoolingLayer(Layer):
""" Implements Region Of Interest Max Pooling
for channel-first images and relative bounding box coordinates
# Constructor parameters
pooled_height, pooled_width (int) --
specify height and width of layer outputs
Shape of inputs
[(batch_size, pooled_height, pooled_width, n_channels),
(batch_size, num_rois, 4)]
Shape of output
(batch_size, num_rois, pooled_height, pooled_width, n_channels)
"""
def __init__(self, pooled_height=9, pooled_width=9, **kwargs):
self.pooled_height = pooled_height
self.pooled_width = pooled_width
super(ROIPoolingLayer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
""" Returns the shape of the ROI Layer output
"""
feature_map_shape, rois_shape = input_shape
assert feature_map_shape[0] == rois_shape[0]
batch_size = feature_map_shape[0]
n_rois = rois_shape[1]
n_channels = feature_map_shape[3]
return (batch_size, n_rois, self.pooled_height,
self.pooled_width, n_channels)
def call(self, x):
""" Maps the input tensor of the ROI layer to its output
# Parameters
x[0] -- Convolutional feature map tensor,
shape (batch_size, pooled_height, pooled_width, n_channels)
x[1] -- Tensor of region of interests from candidate bounding boxes,
shape (batch_size, num_rois, 4)
# Output
pooled_areas -- Tensor with the pooled region of interest, shape
(batch_size, num_rois, pooled_height, pooled_width, n_channels)
"""
def curried_pool_rois(x):
return ROIPoolingLayer._pool_rois(x[0], x[1],
self.pooled_height,
self.pooled_width)
return tf.map_fn(curried_pool_rois, x, dtype=tf.float32)
@staticmethod
def _pool_rois(feature_map, rois, pooled_height, pooled_width):
""" Applies ROI pooling for a single image and varios ROIs
"""
def curried_pool_roi(roi):
return ROIPoolingLayer._pool_roi(feature_map, roi,
pooled_height, pooled_width)
return tf.map_fn(curried_pool_roi, rois, dtype=tf.float32)
@staticmethod
def _pool_roi(feature_map, roi, pooled_height, pooled_width):
""" Applies ROI pooling to a single image and a single region of interest
"""
x = K.cast(roi[0], 'int32')
y = K.cast(roi[1], 'int32')
w = K.cast(roi[2], 'int32')
h = K.cast(roi[3], 'int32')
# Resized roi of the image to pooling size (7x7)
with tf.device('/cpu:0'):
resized = tf.image.resize(feature_map[y:y+h, x:x+w, :],
(pooled_height, pooled_width))
return resized
def rpn_to_roi(y_class, y_regr, w, h, config):
"""Convert rpn layer to roi bboxes
Args:
y_class: output layer rpn classification
shape: (1, feature_height, feature_width, num_anchors)
y_regr: output layer rpn regression
shape: (1, feature_height, feature_width, num_anchors * 4)
config
Returns:
result: boxes from non-max-suppression (shape=(300, 4))
boxes: coordinates for bboxes (on the feature map)
"""
assert y_class.shape[0] == y_regr.shape[0] == 1
y_regr /= config.std_scaling
# cord.shape = (4, feature_map.height, feature_map.width, num_anchors)
rows, cols = y_class.shape[1:3]
cord = np.zeros((4, rows, cols, config.num_anchors))
anchor_sizes = config.anchor_box_scales # (4 in here)
anchor_ratios = config.anchor_box_ratios # (3 in here)
curr_layer = -1
for a_size in anchor_sizes:
for a_ratio in anchor_ratios:
curr_layer += 1
# For every point in x, there are all the y points and vice versa
X, Y = np.meshgrid(np.arange(cols), np.arange(rows))
cord[0, :, :, curr_layer] = X + .5
cord[1, :, :, curr_layer] = Y + .5
cord[0, :, :, curr_layer] *= config.base_network_downscale
cord[1, :, :, curr_layer] *= config.base_network_downscale
cord[2, :, :, curr_layer] = a_size * a_ratio[0]
cord[3, :, :, curr_layer] = a_size * a_ratio[1]
# curr_layer: 0~8 (9 anchors)
regr = y_regr[0, :, :, 4 * curr_layer:4 * curr_layer + 4] # shape => (18, 25, 4)
regr = np.transpose(regr, (2, 0, 1)) # shape => (4, 18, 25)
# Apply regression to x, y, w and h if there is rpn regression layer
cord[:, :, :, curr_layer] = apply_regr(cord[:, :, :, curr_layer], regr)
# Convert (x, y , w, h) to (x1, y1, x2, y2)
cord[2, :, :, curr_layer] += cord[0, :, :, curr_layer]
cord[3, :, :, curr_layer] += cord[1, :, :, curr_layer]
# Avoid bboxes drawn outside of the image
cord[0, :, :, curr_layer] = np.maximum(0, cord[0, :, :, curr_layer])
cord[1, :, :, curr_layer] = np.maximum(0, cord[1, :, :, curr_layer])
cord[2, :, :, curr_layer] = np.minimum(w, cord[2, :, :, curr_layer])
cord[3, :, :, curr_layer] = np.minimum(h, cord[3, :, :, curr_layer])
# flatten to (all_dims, 4)
boxes = np.reshape(cord, (4, -1)).transpose()
probs = y_class.flatten()
# Find out the bboxes which are illegal or very small and delete them from bboxes list
idxs = np.where(np.logical_or(boxes[:, 0] - boxes[:, 2] > -1 * config.base_network_downscale,
boxes[:, 1] - boxes[:, 3] > -1 * config.base_network_downscale))
boxes = np.delete(boxes, idxs, 0)
probs = np.delete(probs, idxs, 0)
idx = nm_suppress(boxes, probs,
overlap_thresh=config.overlap_threshold,
max_boxes=config.num_boxes)
return boxes[idx].astype('int')
| [
"tensorflow.device",
"keras.backend.cast",
"numpy.reshape",
"numpy.minimum",
"tensorflow.image.resize",
"numpy.delete",
"utils.apply_regr",
"numpy.logical_or",
"non_max_suppression.nm_suppress",
"numpy.zeros",
"tensorflow.map_fn",
"numpy.maximum",
"numpy.transpose",
"numpy.arange"
] | [((3935, 3980), 'numpy.zeros', 'np.zeros', (['(4, rows, cols, config.num_anchors)'], {}), '((4, rows, cols, config.num_anchors))\n', (3943, 3980), True, 'import numpy as np\n'), ((6100, 6125), 'numpy.delete', 'np.delete', (['boxes', 'idxs', '(0)'], {}), '(boxes, idxs, 0)\n', (6109, 6125), True, 'import numpy as np\n'), ((6138, 6163), 'numpy.delete', 'np.delete', (['probs', 'idxs', '(0)'], {}), '(probs, idxs, 0)\n', (6147, 6163), True, 'import numpy as np\n'), ((6175, 6273), 'non_max_suppression.nm_suppress', 'nm_suppress', (['boxes', 'probs'], {'overlap_thresh': 'config.overlap_threshold', 'max_boxes': 'config.num_boxes'}), '(boxes, probs, overlap_thresh=config.overlap_threshold,\n max_boxes=config.num_boxes)\n', (6186, 6273), False, 'from non_max_suppression import nm_suppress\n'), ((2198, 2247), 'tensorflow.map_fn', 'tf.map_fn', (['curried_pool_rois', 'x'], {'dtype': 'tf.float32'}), '(curried_pool_rois, x, dtype=tf.float32)\n', (2207, 2247), True, 'import tensorflow as tf\n'), ((2602, 2653), 'tensorflow.map_fn', 'tf.map_fn', (['curried_pool_roi', 'rois'], {'dtype': 'tf.float32'}), '(curried_pool_roi, rois, dtype=tf.float32)\n', (2611, 2653), True, 'import tensorflow as tf\n'), ((2846, 2869), 'keras.backend.cast', 'K.cast', (['roi[0]', '"""int32"""'], {}), "(roi[0], 'int32')\n", (2852, 2869), True, 'from keras import backend as K\n'), ((2882, 2905), 'keras.backend.cast', 'K.cast', (['roi[1]', '"""int32"""'], {}), "(roi[1], 'int32')\n", (2888, 2905), True, 'from keras import backend as K\n'), ((2918, 2941), 'keras.backend.cast', 'K.cast', (['roi[2]', '"""int32"""'], {}), "(roi[2], 'int32')\n", (2924, 2941), True, 'from keras import backend as K\n'), ((2954, 2977), 'keras.backend.cast', 'K.cast', (['roi[3]', '"""int32"""'], {}), "(roi[3], 'int32')\n", (2960, 2977), True, 'from keras import backend as K\n'), ((5911, 6062), 'numpy.logical_or', 'np.logical_or', (['(boxes[:, 0] - boxes[:, 2] > -1 * config.base_network_downscale)', '(boxes[:, 1] - boxes[:, 3] > -1 * config.base_network_downscale)'], {}), '(boxes[:, 0] - boxes[:, 2] > -1 * config.\n base_network_downscale, boxes[:, 1] - boxes[:, 3] > -1 * config.\n base_network_downscale)\n', (5924, 6062), True, 'import numpy as np\n'), ((3049, 3068), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (3058, 3068), True, 'import tensorflow as tf\n'), ((3092, 3177), 'tensorflow.image.resize', 'tf.image.resize', (['feature_map[y:y + h, x:x + w, :]', '(pooled_height, pooled_width)'], {}), '(feature_map[y:y + h, x:x + w, :], (pooled_height, pooled_width)\n )\n', (3107, 3177), True, 'import tensorflow as tf\n'), ((4876, 4905), 'numpy.transpose', 'np.transpose', (['regr', '(2, 0, 1)'], {}), '(regr, (2, 0, 1))\n', (4888, 4905), True, 'import numpy as np\n'), ((5073, 5116), 'utils.apply_regr', 'apply_regr', (['cord[:, :, :, curr_layer]', 'regr'], {}), '(cord[:, :, :, curr_layer], regr)\n', (5083, 5116), False, 'from utils import apply_regr\n'), ((5403, 5443), 'numpy.maximum', 'np.maximum', (['(0)', 'cord[0, :, :, curr_layer]'], {}), '(0, cord[0, :, :, curr_layer])\n', (5413, 5443), True, 'import numpy as np\n'), ((5484, 5524), 'numpy.maximum', 'np.maximum', (['(0)', 'cord[1, :, :, curr_layer]'], {}), '(0, cord[1, :, :, curr_layer])\n', (5494, 5524), True, 'import numpy as np\n'), ((5565, 5605), 'numpy.minimum', 'np.minimum', (['w', 'cord[2, :, :, curr_layer]'], {}), '(w, cord[2, :, :, curr_layer])\n', (5575, 5605), True, 'import numpy as np\n'), ((5646, 5686), 'numpy.minimum', 'np.minimum', (['h', 'cord[3, :, :, curr_layer]'], {}), '(h, cord[3, :, :, curr_layer])\n', (5656, 5686), True, 'import numpy as np\n'), ((5731, 5756), 'numpy.reshape', 'np.reshape', (['cord', '(4, -1)'], {}), '(cord, (4, -1))\n', (5741, 5756), True, 'import numpy as np\n'), ((4330, 4345), 'numpy.arange', 'np.arange', (['cols'], {}), '(cols)\n', (4339, 4345), True, 'import numpy as np\n'), ((4347, 4362), 'numpy.arange', 'np.arange', (['rows'], {}), '(rows)\n', (4356, 4362), True, 'import numpy as np\n')] |
import os
import sys
import utils
import random
import datetime
import argparse
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve
class Evaluation(object):
def __init__(self, logger, dataset):
self.logger = logger
self.dataset = dataset
self.batch_size = dataset.batch_size
def EvaluateModel(self, model, epoch):
val_names = self.dataset.val_names
input_size = self.dataset.input_size
x_batch, y_batch, _ = self.dataset.get_imgs(val_names, input_size)
loss, acc = model.evaluate(x_batch, y_batch, batch_size=self.batch_size, verbose=0)
self.logger.write_tensorboard(['valid_acc', 'valid_loss'], [acc, loss], epoch)
return loss, acc
def PredictFiles(self, model, filenames, batch_size, epoch):
self.test_names = filenames
test_images, test_labels, showlabels = self.dataset.get_imgs(filenames,
self.dataset.input_size)
# predict
pre_result = model.predict(test_images, batch_size=self.batch_size)
# decode result
result = []
for i in range(test_images.shape[0]):
result.append(decode(pre_result[i, ...]))
self.test_result = result
self.test_labels = showlabels
# acc
self.Measure_Acc(epoch)
def Measure_Acc(self, epoch):
acc = 0
for i in range(len(self.test_result)):
result = self.test_result[i]
label = self.test_labels[i]
print('GT: {}\tPre: {}'.format(label, result))
if (result == label):
acc += 1
print('*' * 30)
print('Test Accuracy : {}\n'.format(acc / len(self.test_result)))
"""
evaluation utils
"""
def decode(result):
result = np.reshape(result, (10, 4), order='F')
index = np.argmax(result, axis=0)
string = ''.join(str(ch) for ch in index)
return string | [
"numpy.reshape",
"numpy.argmax"
] | [((2005, 2043), 'numpy.reshape', 'np.reshape', (['result', '(10, 4)'], {'order': '"""F"""'}), "(result, (10, 4), order='F')\n", (2015, 2043), True, 'import numpy as np\n'), ((2057, 2082), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (2066, 2082), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.