code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import csv
from math import atan2,degrees
import numpy as np
#Label line with line2D label data
def labelLine(line,x,label=None,align=True,**kwargs):
ax = line.axes
xdata = line.get_xdata()
ydata = line.get_ydata()
if (x < xdata[0]) or (x > xdata[-1]):
print('x label location is outside data range!')
return
#Find corresponding y co-ordinate and angle of the line
ip = 1
for i in range(len(xdata)):
if x < xdata[i]:
ip = i
break
y = ydata[ip-1] + (ydata[ip]-ydata[ip-1])*(x-xdata[ip-1])/(xdata[ip]-xdata[ip-1])
if not label:
label = line.get_label()
if align:
#Compute the slope
dx = xdata[ip] - xdata[ip-1]
dy = ydata[ip] - ydata[ip-1]
ang = degrees(atan2(dy,dx))
#Transform to screen co-ordinates
pt = np.array([x,y]).reshape((1,2))
trans_angle = ax.transData.transform_angles(np.array((ang,)),pt)[0]
else:
trans_angle = 0
#Set a bunch of keyword arguments
if 'color' not in kwargs:
kwargs['color'] = line.get_color()
if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs):
kwargs['ha'] = 'center'
if ('verticalalignment' not in kwargs) and ('va' not in kwargs):
kwargs['va'] = 'center'
if 'backgroundcolor' not in kwargs:
kwargs['backgroundcolor'] = ax.get_facecolor()
if 'clip_on' not in kwargs:
kwargs['clip_on'] = True
if 'zorder' not in kwargs:
kwargs['zorder'] = 2.5
ax.text(x,y,label,rotation=trans_angle,**kwargs)
def labelLines(lines,align=True,xvals=None,**kwargs):
ax = lines[0].axes
labLines = []
labels = []
#Take only the lines which have labels other than the default ones
for line in lines:
label = line.get_label()
if "_line" not in label:
labLines.append(line)
labels.append(label)
if xvals is None:
xmin,xmax = ax.get_xlim()
xvals = np.linspace(xmin,xmax,len(labLines)+2)[5:-5]
for line,x,label in zip(labLines,xvals,labels):
labelLine(line,x,label,align,**kwargs)
def create_graphic(name,file_path,column_key,exclude,operation_fun):
x_out={}
y_out={}
with open(file_path, 'r') as csvfile:
plots= csv.DictReader(csvfile, delimiter=',')
for row in plots:
if row[column_key] not in exclude:
value = operation_fun(row)
if row[column_key] in y_out:
y_out[row[column_key]].append(value)
x_out[row[column_key]].append(len(x_out[row[column_key]]))
else:
y_out[row[column_key]] = [value]
x_out[row[column_key]] = [0]
plt.figure(name)
for key,value in y_out.items():
plt.plot(x_out[key],value,marker='o',label=key)
labelLines(plt.gca().get_lines(),zorder=2.5)
#plt.legend()
plt.title('COVID 19')
plt.xlabel('Days')
plt.ylabel('People')
plt.show(False)
### Main
exclude_region = []
exclude_province = ["In fase di definizione/aggiornamento"]
fun_totale_casi = lambda row : int(row['totale_casi'])
fun_percentuale_guariti = lambda row: int(row['dimessi_guariti']) / int(row['totale_casi']) if int(row['totale_casi']) != 0 else 0
create_graphic("Regioni",'dati-regioni/dpc-covid19-ita-regioni.csv','denominazione_regione',exclude_region, fun_totale_casi)
create_graphic("Province",'dati-province/dpc-covid19-ita-province.csv','denominazione_provincia',exclude_province, fun_totale_casi)
create_graphic("Nazionale",'dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv','stato',[], fun_totale_casi)
create_graphic("Regioni Guariti / Totale Casi",'dati-regioni/dpc-covid19-ita-regioni.csv','denominazione_regione',exclude_region,fun_percentuale_guariti)
input("Press Enter to close...")
### End
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.atan2",
"csv.DictReader",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((3003, 3019), 'matplotlib.pyplot.figure', 'plt.figure', (['name'], {}), '(name)\n', (3013, 3019), True, 'import matplotlib.pyplot as plt\n'), ((3219, 3240), 'matplotlib.pyplot.title', 'plt.title', (['"""COVID 19"""'], {}), "('COVID 19')\n", (3228, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3268), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (3260, 3268), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3297), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""People"""'], {}), "('People')\n", (3287, 3297), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3322), 'matplotlib.pyplot.show', 'plt.show', (['(False)'], {}), '(False)\n', (3315, 3322), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2424), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2400, 2424), False, 'import csv\n'), ((3077, 3127), 'matplotlib.pyplot.plot', 'plt.plot', (['x_out[key]', 'value'], {'marker': '"""o"""', 'label': 'key'}), "(x_out[key], value, marker='o', label=key)\n", (3085, 3127), True, 'import matplotlib.pyplot as plt\n'), ((840, 853), 'math.atan2', 'atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (845, 853), False, 'from math import atan2, degrees\n'), ((910, 926), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (918, 926), True, 'import numpy as np\n'), ((993, 1009), 'numpy.array', 'np.array', (['(ang,)'], {}), '((ang,))\n', (1001, 1009), True, 'import numpy as np\n'), ((3145, 3154), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3152, 3154), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import testing
from chainer.testing import attr
class TestDummyDeviceType(unittest.TestCase):
def test_int(self):
self.assertEqual(int(cuda.DummyDeviceType()), -1)
def test_eq(self):
self.assertEqual(cuda.DummyDeviceType(), cuda.DummyDeviceType())
def test_ne(self):
self.assertNotEqual(cuda.DummyDeviceType(), 1)
_builtins_available = False
try:
import builtins
_builtins_available = True
except ImportError:
pass
class TestCuda(unittest.TestCase):
def test_get_dummy_device(self):
self.assertIs(cuda.get_device(), cuda.DummyDevice)
def test_get_device_for_numpy_int(self):
self.assertIs(cuda.get_device(numpy.int64(0)), cuda.DummyDevice)
@attr.gpu
def test_get_dummy_device_for_empty_array(self):
x = cuda.cupy.array([]).reshape((0, 10))
self.assertIs(cuda.get_device(x), cuda.DummyDevice)
@attr.gpu
def test_get_device_for_int(self):
self.assertEqual(cuda.get_device(0), cuda.Device(0))
@attr.gpu
@unittest.skipUnless(_builtins_available,
'builtins module is not available')
def test_get_device_for_builtin_int(self):
# builtins.int is from future package and it is different
# from builtin int/long on Python 2.
self.assertEqual(cuda.get_device(builtins.int(0)), cuda.Device(0))
@attr.gpu
def test_get_device_for_device(self):
device = cuda.get_device(0)
self.assertIs(cuda.get_device(device), device)
def test_to_gpu_unavailable(self):
x = numpy.array([1])
if not cuda.available:
with self.assertRaises(RuntimeError):
cuda.to_gpu(x)
def test_get_array_module_for_numpy(self):
self.assertIs(cuda.get_array_module(numpy.array([])), numpy)
self.assertIs(
cuda.get_array_module(chainer.Variable(numpy.array([]))),
numpy)
@attr.gpu
def test_get_array_module_for_cupy(self):
self.assertIs(cuda.get_array_module(cuda.cupy.array([])), cuda.cupy)
self.assertIs(
cuda.get_array_module(chainer.Variable(cuda.cupy.array([]))),
cuda.cupy)
@testing.parameterize(
{'c_contiguous': True},
{'c_contiguous': False},
)
class TestToCPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
def test_numpy_array(self):
y = cuda.to_cpu(self.x)
self.assertIs(self.x, y) # Do not copy
@attr.gpu
def test_cupy_array(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x)
self.assertIsInstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array2(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
y = cuda.to_cpu(x)
self.assertIsInstance(y, numpy.ndarray)
numpy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_numpy_array_async(self):
y = cuda.to_cpu(self.x, stream=cuda.Stream())
self.assertIsInstance(y, numpy.ndarray)
self.assertIs(self.x, y) # Do not copy
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
self.assertIsInstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=1)
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_cpu(x, stream=cuda.Stream.null)
self.assertIsInstance(y, numpy.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
def test_variable(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_cpu(x)
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.space = cuda.get_max_workspace_size()
def tearDown(self):
cuda.set_max_workspace_size(self.space)
def test_size(self):
size = 1024
cuda.set_max_workspace_size(size)
self.assertEqual(size, cuda.get_max_workspace_size())
@testing.parameterize(
{'c_contiguous': True},
{'c_contiguous': False},
)
class TestToGPU(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3))
if not self.c_contiguous:
self.x = self.x.T
@attr.gpu
def test_numpy_array(self):
y = cuda.to_gpu(self.x)
self.assertIsInstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.gpu
def test_cupy_array1(self):
x = cuda.to_gpu(self.x)
y = cuda.to_gpu(x)
self.assertIsInstance(y, cuda.ndarray)
self.assertIs(x, y) # Do not copy
@attr.multi_gpu(2)
def test_cupy_array2(self):
x = cuda.to_gpu(self.x, device=0)
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_gpu(x, device=1)
self.assertIsInstance(y, cuda.ndarray)
self.assertEqual(int(y.device), 1)
@attr.gpu
def test_numpy_array_async(self):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
self.assertIsInstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
@attr.multi_gpu(2)
def test_numpy_array_async2(self):
y = cuda.to_gpu(self.x, device=1, stream=cuda.Stream.null)
self.assertIsInstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
self.assertEqual(int(y.device), 1)
@attr.multi_gpu(2)
def test_numpy_array_async3(self):
with cuda.Device(1):
y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
self.assertIsInstance(y, cuda.ndarray)
cuda.cupy.testing.assert_array_equal(self.x, y)
self.assertEqual(int(y.device), 1)
@attr.gpu
def test_cupy_array_async1(self):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_gpu(x, stream=cuda.Stream())
self.assertIsInstance(y, cuda.ndarray)
self.assertIs(x, y) # Do not copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async2(self):
x = cuda.to_gpu(self.x, device=0)
with x.device:
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
y = cuda.to_gpu(x, device=1, stream=cuda.Stream.null)
self.assertIsInstance(y, cuda.ndarray)
self.assertIsNot(x, y) # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
@attr.multi_gpu(2)
def test_cupy_array_async3(self):
with cuda.Device(0):
x = cuda.to_gpu(self.x)
if not self.c_contiguous:
x = cuda.cupy.asfortranarray(x)
with cuda.Device(1):
y = cuda.to_gpu(x, stream=cuda.Stream.null)
self.assertIsInstance(y, cuda.ndarray)
self.assertIsNot(x, y) # Do copy
cuda.cupy.testing.assert_array_equal(x, y)
def test_variable_cpu(self):
x = chainer.Variable(self.x)
with self.assertRaises(TypeError):
cuda.to_cpu(x)
testing.run_module(__name__, __file__)
| [
"chainer.testing.parameterize",
"chainer.cuda.cupy.testing.assert_array_equal",
"chainer.cuda.Stream",
"chainer.testing.run_module",
"chainer.testing.attr.multi_gpu",
"chainer.cuda.set_max_workspace_size",
"chainer.cuda.cupy.asfortranarray",
"chainer.cuda.to_cpu",
"numpy.int64",
"chainer.cuda.get_... | [((2273, 2342), 'chainer.testing.parameterize', 'testing.parameterize', (["{'c_contiguous': True}", "{'c_contiguous': False}"], {}), "({'c_contiguous': True}, {'c_contiguous': False})\n", (2293, 2342), False, 'from chainer import testing\n'), ((4597, 4666), 'chainer.testing.parameterize', 'testing.parameterize', (["{'c_contiguous': True}", "{'c_contiguous': False}"], {}), "({'c_contiguous': True}, {'c_contiguous': False})\n", (4617, 4666), False, 'from chainer import testing\n'), ((7702, 7740), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (7720, 7740), False, 'from chainer import testing\n'), ((1119, 1195), 'unittest.skipUnless', 'unittest.skipUnless', (['_builtins_available', '"""builtins module is not available"""'], {}), "(_builtins_available, 'builtins module is not available')\n", (1138, 1195), False, 'import unittest\n'), ((2867, 2884), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (2881, 2884), False, 'from chainer.testing import attr\n'), ((3756, 3773), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (3770, 3773), False, 'from chainer.testing import attr\n'), ((5237, 5254), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (5251, 5254), False, 'from chainer.testing import attr\n'), ((5784, 5801), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (5798, 5801), False, 'from chainer.testing import attr\n'), ((6060, 6077), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (6074, 6077), False, 'from chainer.testing import attr\n'), ((6712, 6729), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (6726, 6729), False, 'from chainer.testing import attr\n'), ((7127, 7144), 'chainer.testing.attr.multi_gpu', 'attr.multi_gpu', (['(2)'], {}), '(2)\n', (7141, 7144), False, 'from chainer.testing import attr\n'), ((1528, 1546), 'chainer.cuda.get_device', 'cuda.get_device', (['(0)'], {}), '(0)\n', (1543, 1546), False, 'from chainer import cuda\n'), ((1654, 1670), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (1665, 1670), False, 'import numpy\n'), ((2429, 2464), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(2, 3)'], {}), '(-1, 1, (2, 3))\n', (2449, 2464), False, 'import numpy\n'), ((2510, 2529), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['self.x'], {}), '(self.x)\n', (2521, 2529), False, 'from chainer import cuda\n'), ((2636, 2655), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2647, 2655), False, 'from chainer import cuda\n'), ((2746, 2760), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {}), '(x)\n', (2757, 2760), False, 'from chainer import cuda\n'), ((2817, 2860), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (2849, 2860), False, 'import numpy\n'), ((3184, 3227), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (3216, 3227), False, 'import numpy\n'), ((3496, 3515), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (3507, 3515), False, 'from chainer import cuda\n'), ((3606, 3645), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {'stream': 'cuda.Stream.null'}), '(x, stream=cuda.Stream.null)\n', (3617, 3645), False, 'from chainer import cuda\n'), ((3702, 3749), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (3738, 3749), False, 'from chainer import cuda\n'), ((3824, 3853), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'device': '(1)'}), '(self.x, device=1)\n', (3835, 3853), False, 'from chainer import cuda\n'), ((3975, 4014), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {'stream': 'cuda.Stream.null'}), '(x, stream=cuda.Stream.null)\n', (3986, 4014), False, 'from chainer import cuda\n'), ((4071, 4118), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (4107, 4118), False, 'from chainer import cuda\n'), ((4161, 4185), 'chainer.Variable', 'chainer.Variable', (['self.x'], {}), '(self.x)\n', (4177, 4185), False, 'import chainer\n'), ((4341, 4370), 'chainer.cuda.get_max_workspace_size', 'cuda.get_max_workspace_size', ([], {}), '()\n', (4368, 4370), False, 'from chainer import cuda\n'), ((4404, 4443), 'chainer.cuda.set_max_workspace_size', 'cuda.set_max_workspace_size', (['self.space'], {}), '(self.space)\n', (4431, 4443), False, 'from chainer import cuda\n'), ((4498, 4531), 'chainer.cuda.set_max_workspace_size', 'cuda.set_max_workspace_size', (['size'], {}), '(size)\n', (4525, 4531), False, 'from chainer import cuda\n'), ((4753, 4788), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(2, 3)'], {}), '(-1, 1, (2, 3))\n', (4773, 4788), False, 'import numpy\n'), ((4912, 4931), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (4923, 4931), False, 'from chainer import cuda\n'), ((4987, 5034), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (5023, 5034), False, 'from chainer import cuda\n'), ((5094, 5113), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (5105, 5113), False, 'from chainer import cuda\n'), ((5126, 5140), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {}), '(x)\n', (5137, 5140), False, 'from chainer import cuda\n'), ((5299, 5328), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'device': '(0)'}), '(self.x, device=0)\n', (5310, 5328), False, 'from chainer import cuda\n'), ((5450, 5474), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {'device': '(1)'}), '(x, device=1)\n', (5461, 5474), False, 'from chainer import cuda\n'), ((5630, 5674), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'stream': 'cuda.Stream.null'}), '(self.x, stream=cuda.Stream.null)\n', (5641, 5674), False, 'from chainer import cuda\n'), ((5730, 5777), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (5766, 5777), False, 'from chainer import cuda\n'), ((5853, 5907), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'device': '(1)', 'stream': 'cuda.Stream.null'}), '(self.x, device=1, stream=cuda.Stream.null)\n', (5864, 5907), False, 'from chainer import cuda\n'), ((5963, 6010), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (5999, 6010), False, 'from chainer import cuda\n'), ((6262, 6309), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['self.x', 'y'], {}), '(self.x, y)\n', (6298, 6309), False, 'from chainer import cuda\n'), ((6418, 6437), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (6429, 6437), False, 'from chainer import cuda\n'), ((6663, 6705), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (6699, 6705), False, 'from chainer import cuda\n'), ((6780, 6809), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'device': '(0)'}), '(self.x, device=0)\n', (6791, 6809), False, 'from chainer import cuda\n'), ((6931, 6980), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {'device': '(1)', 'stream': 'cuda.Stream.null'}), '(x, device=1, stream=cuda.Stream.null)\n', (6942, 6980), False, 'from chainer import cuda\n'), ((7078, 7120), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (7114, 7120), False, 'from chainer import cuda\n'), ((7516, 7558), 'chainer.cuda.cupy.testing.assert_array_equal', 'cuda.cupy.testing.assert_array_equal', (['x', 'y'], {}), '(x, y)\n', (7552, 7558), False, 'from chainer import cuda\n'), ((7605, 7629), 'chainer.Variable', 'chainer.Variable', (['self.x'], {}), '(self.x)\n', (7621, 7629), False, 'import chainer\n'), ((312, 334), 'chainer.cuda.DummyDeviceType', 'cuda.DummyDeviceType', ([], {}), '()\n', (332, 334), False, 'from chainer import cuda\n'), ((336, 358), 'chainer.cuda.DummyDeviceType', 'cuda.DummyDeviceType', ([], {}), '()\n', (356, 358), False, 'from chainer import cuda\n'), ((412, 434), 'chainer.cuda.DummyDeviceType', 'cuda.DummyDeviceType', ([], {}), '()\n', (432, 434), False, 'from chainer import cuda\n'), ((651, 668), 'chainer.cuda.get_device', 'cuda.get_device', ([], {}), '()\n', (666, 668), False, 'from chainer import cuda\n'), ((946, 964), 'chainer.cuda.get_device', 'cuda.get_device', (['x'], {}), '(x)\n', (961, 964), False, 'from chainer import cuda\n'), ((1063, 1081), 'chainer.cuda.get_device', 'cuda.get_device', (['(0)'], {}), '(0)\n', (1078, 1081), False, 'from chainer import cuda\n'), ((1083, 1097), 'chainer.cuda.Device', 'cuda.Device', (['(0)'], {}), '(0)\n', (1094, 1097), False, 'from chainer import cuda\n'), ((1438, 1452), 'chainer.cuda.Device', 'cuda.Device', (['(0)'], {}), '(0)\n', (1449, 1452), False, 'from chainer import cuda\n'), ((1569, 1592), 'chainer.cuda.get_device', 'cuda.get_device', (['device'], {}), '(device)\n', (1584, 1592), False, 'from chainer import cuda\n'), ((2706, 2733), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (2730, 2733), False, 'from chainer import cuda\n'), ((2930, 2944), 'chainer.cuda.Device', 'cuda.Device', (['(0)'], {}), '(0)\n', (2941, 2944), False, 'from chainer import cuda\n'), ((2962, 2981), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (2973, 2981), False, 'from chainer import cuda\n'), ((3081, 3095), 'chainer.cuda.Device', 'cuda.Device', (['(1)'], {}), '(1)\n', (3092, 3095), False, 'from chainer import cuda\n'), ((3113, 3127), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {}), '(x)\n', (3124, 3127), False, 'from chainer import cuda\n'), ((3566, 3593), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (3590, 3593), False, 'from chainer import cuda\n'), ((4241, 4255), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {}), '(x)\n', (4252, 4255), False, 'from chainer import cuda\n'), ((4563, 4592), 'chainer.cuda.get_max_workspace_size', 'cuda.get_max_workspace_size', ([], {}), '()\n', (4590, 4592), False, 'from chainer import cuda\n'), ((6130, 6144), 'chainer.cuda.Device', 'cuda.Device', (['(1)'], {}), '(1)\n', (6141, 6144), False, 'from chainer import cuda\n'), ((6162, 6206), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {'stream': 'cuda.Stream.null'}), '(self.x, stream=cuda.Stream.null)\n', (6173, 6206), False, 'from chainer import cuda\n'), ((6488, 6515), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (6512, 6515), False, 'from chainer import cuda\n'), ((7196, 7210), 'chainer.cuda.Device', 'cuda.Device', (['(0)'], {}), '(0)\n', (7207, 7210), False, 'from chainer import cuda\n'), ((7228, 7247), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (7239, 7247), False, 'from chainer import cuda\n'), ((7347, 7361), 'chainer.cuda.Device', 'cuda.Device', (['(1)'], {}), '(1)\n', (7358, 7361), False, 'from chainer import cuda\n'), ((7379, 7418), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {'stream': 'cuda.Stream.null'}), '(x, stream=cuda.Stream.null)\n', (7390, 7418), False, 'from chainer import cuda\n'), ((7685, 7699), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x'], {}), '(x)\n', (7696, 7699), False, 'from chainer import cuda\n'), ((234, 256), 'chainer.cuda.DummyDeviceType', 'cuda.DummyDeviceType', ([], {}), '()\n', (254, 256), False, 'from chainer import cuda\n'), ((772, 786), 'numpy.int64', 'numpy.int64', (['(0)'], {}), '(0)\n', (783, 786), False, 'import numpy\n'), ((887, 906), 'chainer.cuda.cupy.array', 'cuda.cupy.array', (['[]'], {}), '([])\n', (902, 906), False, 'from chainer import cuda\n'), ((1420, 1435), 'builtins.int', 'builtins.int', (['(0)'], {}), '(0)\n', (1432, 1435), False, 'import builtins\n'), ((1768, 1782), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['x'], {}), '(x)\n', (1779, 1782), False, 'from chainer import cuda\n'), ((1875, 1890), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (1886, 1890), False, 'import numpy\n'), ((2117, 2136), 'chainer.cuda.cupy.array', 'cuda.cupy.array', (['[]'], {}), '([])\n', (2132, 2136), False, 'from chainer import cuda\n'), ((3040, 3067), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (3064, 3067), False, 'from chainer import cuda\n'), ((3320, 3333), 'chainer.cuda.Stream', 'cuda.Stream', ([], {}), '()\n', (3331, 3333), False, 'from chainer import cuda\n'), ((3935, 3962), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (3959, 3962), False, 'from chainer import cuda\n'), ((5410, 5437), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (5434, 5437), False, 'from chainer import cuda\n'), ((6550, 6563), 'chainer.cuda.Stream', 'cuda.Stream', ([], {}), '()\n', (6561, 6563), False, 'from chainer import cuda\n'), ((6891, 6918), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (6915, 6918), False, 'from chainer import cuda\n'), ((7306, 7333), 'chainer.cuda.cupy.asfortranarray', 'cuda.cupy.asfortranarray', (['x'], {}), '(x)\n', (7330, 7333), False, 'from chainer import cuda\n'), ((1974, 1989), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (1985, 1989), False, 'import numpy\n'), ((2224, 2243), 'chainer.cuda.cupy.array', 'cuda.cupy.array', (['[]'], {}), '([])\n', (2239, 2243), False, 'from chainer import cuda\n')] |
import os
import keras
import random
import numpy as np
from ibex.utilities import dataIO
from ibex.utilities.constants import *
from ibex.cnns.biological.util import AugmentFeature
from ibex.cnns.biological.nodes.train import NodeNetwork, PlotLosses, WriteLogFiles
# node generation function is similar except that it only reads files that have dataset in the name
def NodeGenerator(parameters, width, radius, subset, dataset):
# SNEMI3D hack
if subset == 'validation': validation = True
else: validation = False
subset = 'training'
# get the directories corresponding to this radius and subset
positive_directory = 'features/biological/nodes-{}nm-{}x{}x{}/{}/positives'.format(radius, width[IB_Z + 1], width[IB_Y + 1], width[IB_X + 1], subset)
negative_directory = 'features/biological/nodes-{}nm-{}x{}x{}/{}/negatives'.format(radius, width[IB_Z + 1], width[IB_Y + 1], width[IB_X + 1], subset)
# get all the positive candidate filenames
positive_filenames = os.listdir(positive_directory)
positive_candidates = []
for positive_filename in positive_filenames:
if not all(restriction in positive_filename for restriction in dataset): continue
if not positive_filename[-3:] == '.h5': continue
positive_candidates.append(dataIO.ReadH5File('{}/{}'.format(positive_directory, positive_filename), 'main'))
positive_candidates = np.concatenate(positive_candidates, axis=0)
# get all the negative candidate filenames
negative_filenames = os.listdir(negative_directory)
negative_candidates = []
for negative_filename in negative_filenames:
if not all(restriction in negative_filename for restriction in dataset): continue
if not negative_filename[-3:] == '.h5': continue
negative_candidates.append(dataIO.ReadH5File('{}/{}'.format(negative_directory, negative_filename), 'main'))
negative_candidates = np.concatenate(negative_candidates, axis=0)
if validation:
positive_candidates = positive_candidates[int(0.7 * positive_candidates.shape[0]):]
negative_candidates = negative_candidates[int(0.7 * negative_candidates.shape[0]):]
else:
positive_candidates = positive_candidates[:int(0.7 * positive_candidates.shape[0])]
negative_candidates = negative_candidates[:int(0.7 * negative_candidates.shape[0])]
# create easy access to the numbers of candidates
npositive_candidates = positive_candidates.shape[0]
nnegative_candidates = negative_candidates.shape[0]
batch_size = parameters['batch_size']
examples = np.zeros((batch_size, width[0], width[IB_Z+1], width[IB_Y+1], width[IB_X+1]), dtype=np.float32)
labels = np.zeros(batch_size, dtype=np.float32)
positive_order = range(npositive_candidates)
negative_order = range(nnegative_candidates)
random.shuffle(positive_order)
random.shuffle(negative_order)
positive_index = 0
negative_index = 0
while True:
for iv in range(batch_size / 2):
positive_candidate = positive_candidates[positive_order[positive_index]]
negative_candidate = negative_candidates[negative_order[negative_index]]
examples[2*iv,:,:,:,:] = AugmentFeature(positive_candidate, width)
labels[2*iv] = True
examples[2*iv+1,:,:,:,:] = AugmentFeature(negative_candidate, width)
labels[2*iv+1] = False
positive_index += 1
if positive_index == npositive_candidates:
random.shuffle(positive_order)
positive_index = 0
negative_index += 1
if negative_index == nnegative_candidates:
random.shuffle(negative_order)
negative_index = 0
yield (examples, labels)
def Finetune(parameters, trained_network_prefix, width, radius, dataset):
# make sure the model prefix does not contain edges (to prevent overwriting files)
assert (not 'edges' in trained_network_prefix)
assert (dataset[0] == 'SNEMI3D')
# identify convenient variables
starting_epoch = parameters['starting_epoch']
batch_size = parameters['batch_size']
examples_per_epoch = parameters['examples_per_epoch']
weights = parameters['weights']
model = NodeNetwork(parameters, width)
model.load_weights('{}-best-loss.h5'.format(trained_network_prefix))
root_location = trained_network_prefix.rfind('/')
output_folder = '{}-{}'.format(trained_network_prefix[:root_location], '-'.join(dataset))
if not os.path.exists(output_folder):
os.makedirs(output_folder)
model_prefix = '{}/nodes'.format(output_folder)
# open up the log file with no buffer
logfile = '{}.log'.format(model_prefix)
# write out the network parameters to a file
WriteLogFiles(model, model_prefix, parameters)
# create a set of keras callbacks
callbacks = []
# save the best model seen so far
best_loss = keras.callbacks.ModelCheckpoint('{}-best-loss.h5'.format(model_prefix), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
callbacks.append(best_loss)
best_acc = keras.callbacks.ModelCheckpoint('{}-best-acc.h5'.format(model_prefix), monitor='val_acc', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
callbacks.append(best_acc)
all_models = keras.callbacks.ModelCheckpoint(model_prefix + '-{epoch:03d}.h5', verbose=0, save_best_only=False, save_weights_only=True, period=5)
callbacks.append(all_models)
# plot the loss functions
plot_losses = PlotLosses(model_prefix)
callbacks.append(plot_losses)
# save the json file
json_string = model.to_json()
open('{}.json'.format(model_prefix), 'w').write(json_string)
if starting_epoch:
model.load_weights('{}-{:03d}.h5'.format(model_prefix, starting_epoch))
# there are two thousand validation examples per epoch (standardized)
nvalidation_examples = 2000
# train the model
history = model.fit_generator(NodeGenerator(parameters, width, radius, 'training', dataset), steps_per_epoch=(examples_per_epoch / batch_size),
epochs=250, verbose=1, class_weight=weights, callbacks=callbacks, validation_data=NodeGenerator(parameters, width, radius, 'validation', dataset),
validation_steps=(nvalidation_examples / batch_size), initial_epoch=starting_epoch)
with open('{}-history.pickle'.format(model_prefix), 'w') as fd:
pickle.dump(history.history, fd)
# save the fully trained model
model.save_weights('{}.h5'.format(model_prefix))
| [
"ibex.cnns.biological.nodes.train.NodeNetwork",
"os.makedirs",
"ibex.cnns.biological.nodes.train.WriteLogFiles",
"keras.callbacks.ModelCheckpoint",
"random.shuffle",
"numpy.zeros",
"ibex.cnns.biological.nodes.train.PlotLosses",
"os.path.exists",
"ibex.cnns.biological.util.AugmentFeature",
"os.list... | [((1003, 1033), 'os.listdir', 'os.listdir', (['positive_directory'], {}), '(positive_directory)\n', (1013, 1033), False, 'import os\n'), ((1402, 1445), 'numpy.concatenate', 'np.concatenate', (['positive_candidates'], {'axis': '(0)'}), '(positive_candidates, axis=0)\n', (1416, 1445), True, 'import numpy as np\n'), ((1519, 1549), 'os.listdir', 'os.listdir', (['negative_directory'], {}), '(negative_directory)\n', (1529, 1549), False, 'import os\n'), ((1919, 1962), 'numpy.concatenate', 'np.concatenate', (['negative_candidates'], {'axis': '(0)'}), '(negative_candidates, axis=0)\n', (1933, 1962), True, 'import numpy as np\n'), ((2588, 2694), 'numpy.zeros', 'np.zeros', (['(batch_size, width[0], width[IB_Z + 1], width[IB_Y + 1], width[IB_X + 1])'], {'dtype': 'np.float32'}), '((batch_size, width[0], width[IB_Z + 1], width[IB_Y + 1], width[\n IB_X + 1]), dtype=np.float32)\n', (2596, 2694), True, 'import numpy as np\n'), ((2697, 2735), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.float32'}), '(batch_size, dtype=np.float32)\n', (2705, 2735), True, 'import numpy as np\n'), ((2840, 2870), 'random.shuffle', 'random.shuffle', (['positive_order'], {}), '(positive_order)\n', (2854, 2870), False, 'import random\n'), ((2875, 2905), 'random.shuffle', 'random.shuffle', (['negative_order'], {}), '(negative_order)\n', (2889, 2905), False, 'import random\n'), ((4272, 4302), 'ibex.cnns.biological.nodes.train.NodeNetwork', 'NodeNetwork', (['parameters', 'width'], {}), '(parameters, width)\n', (4283, 4302), False, 'from ibex.cnns.biological.nodes.train import NodeNetwork, PlotLosses, WriteLogFiles\n'), ((4797, 4843), 'ibex.cnns.biological.nodes.train.WriteLogFiles', 'WriteLogFiles', (['model', 'model_prefix', 'parameters'], {}), '(model, model_prefix, parameters)\n', (4810, 4843), False, 'from ibex.cnns.biological.nodes.train import NodeNetwork, PlotLosses, WriteLogFiles\n'), ((5396, 5532), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (["(model_prefix + '-{epoch:03d}.h5')"], {'verbose': '(0)', 'save_best_only': '(False)', 'save_weights_only': '(True)', 'period': '(5)'}), "(model_prefix + '-{epoch:03d}.h5', verbose=0,\n save_best_only=False, save_weights_only=True, period=5)\n", (5427, 5532), False, 'import keras\n'), ((5611, 5635), 'ibex.cnns.biological.nodes.train.PlotLosses', 'PlotLosses', (['model_prefix'], {}), '(model_prefix)\n', (5621, 5635), False, 'from ibex.cnns.biological.nodes.train import NodeNetwork, PlotLosses, WriteLogFiles\n'), ((4537, 4566), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (4551, 4566), False, 'import os\n'), ((4576, 4602), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (4587, 4602), False, 'import os\n'), ((3219, 3260), 'ibex.cnns.biological.util.AugmentFeature', 'AugmentFeature', (['positive_candidate', 'width'], {}), '(positive_candidate, width)\n', (3233, 3260), False, 'from ibex.cnns.biological.util import AugmentFeature\n'), ((3332, 3373), 'ibex.cnns.biological.util.AugmentFeature', 'AugmentFeature', (['negative_candidate', 'width'], {}), '(negative_candidate, width)\n', (3346, 3373), False, 'from ibex.cnns.biological.util import AugmentFeature\n'), ((3513, 3543), 'random.shuffle', 'random.shuffle', (['positive_order'], {}), '(positive_order)\n', (3527, 3543), False, 'import random\n'), ((3682, 3712), 'random.shuffle', 'random.shuffle', (['negative_order'], {}), '(negative_order)\n', (3696, 3712), False, 'import random\n')] |
import numpy as np
import pandas as pd
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
import joblib
from convokit.transformer import Transformer
class PromptTypes(Transformer):
"""
Model that infers a vector representation of utterances in terms of the responses that similar utterances tend to
prompt, as well as types of rhetorical intentions encapsulated by utterances in a corpus, in terms of their
anticipated responses (operationalized as k-means clusters of vectors).
Under the surface, the model takes as input pairs of prompts and responses during the fit step. In this stage the
following subcomponents are involved:
1. a prompt embedding model that will learn the vector representations;
2. a prompt type model that learns a clustering of these representations.
The model can transform individual (unpaired) utterances in the transform step. While the focus is on representing
properties of prompts, as a side-effect the model can also compute representations that encapsulate properties of
responses and assign responses to prompt types (as "typical responses" to the prompts in that type).
Internally, the model contains the following elements:
* prompt_embedding_model: stores models that compute the vector representations. includes tf-idf models that convert the prompt and response input to term document matrices, an SVD model that produces a low-dimensional representation of responses and prompts, and vector representations of prompt and response terms
* type_models: stores kmeans models along with type assignments of prompt and response terms
* train_results: stores the vector representations of the corpus used to train the model in the fit step
* train_types: stores the type assignments of the corpus used in the fit step
The transformer will output several attributes of an utterance (names prefixed with <output_field>__). If the utterance is a prompt (in the default case, if it has a response), then the following will be outputted.
* prompt_repr: a vector representation of the utterance (stored as a corpus-wide matrix, or in the metadata of an individual utterance if `transform_utterance` is called)
* prompt_dists.<number of types>: a vector storing the distance between the utterance vector and the centroid of each k-means cluster (stored as a corpus-wide matrix, or in the metadata of an individual utterance if `transform_utterance` is called)
* prompt_type.<number of types>: the index of the type the utterance is assigned to
* prompt_type_dist.<number of types>: the distance from the vector representation to the centroid of the assigned type
If the utterance is a response to a previous utterance, then the utterance will also be annotated an analogous set of attributes denoting its response representation and type.
For downstream tasks, a reasonable first step is to only look at the prompt-side representations.
For an end-to-end implementation that runs several default values of the parameters, see the `PromptTypeWrapper` module.
:param prompt_field: the name of the attribute of prompts to use as input to fit.
:param reference_field: the name of the attribute of responses to use as input to fit. a reasonable choice is to set to
the same value as prompt_field.
:param output_field: the name of the attribute to write to in the transform step. the transformer outputs several
fields, as listed above.
:param n_types: the number of types to infer. defaults to 8.
:param prompt_transform_field: the name of the attribute of prompts to use as input to transform; defaults to the
same attribute as in fit.
:param reference_transform_field: the name of the attribute of responses to use as input to transform; defaults to the
same attribute as in fit.
:param prompt__tfidf_min_df: the minimum frequency of prompt terms to use. can be specified as a fraction or as an
absolute count, defaults to 100.
:param prompt__tfidf_max_df: the maximum frequency of prompt terms to use. can be specified as a fraction or as an
absolute count, defaults to 0.1. Setting higher is more permissive, but may result in many stopword-like terms
adding noise to the model.
:param reference__tfidf_min_df: the minimum frequency of response terms to use. can be specified as a fraction or as an
absolute count, defaults to 100.
:param reference__tfidf_max_df: the maximum frequency of response terms to use. can be specified as a fraction or as an
absolute count, defaults to 0.1.
:param snip_first_dim: whether or not to remove the first SVD dimension (which may add noise to the model; typically
this reflects frequency rather than any semantic interpretation). defaults to `True`.
:param svd__n_components: the number of SVD dimensions to use, defaults to 25. higher values result in richer
vector representations, perhaps at the cost of the model learning overly-specific types.
:param max_dist: the maximum distance between a vector representation of an utterance and the cluster centroid; a
cluster whose distance to all centroids is above this cutoff will get assigned to a null type, denoted by -1.
Defaults to 0.9.
:param random_state: the random seed to use.
:param verbosity: frequency of status messages.
"""
def __init__(self, prompt_field, reference_field, output_field, n_types=8,
prompt_transform_field=None, reference_transform_field=None,
prompt__tfidf_min_df=100, prompt__tfidf_max_df=.1,
reference__tfidf_min_df=100, reference__tfidf_max_df=.1,
snip_first_dim=True,
svd__n_components=25, max_dist=.9,
random_state=None, verbosity=0):
self.prompt_embedding_model = {}
self.type_models = {}
self.train_results = {}
self.train_types = {}
self.prompt_field = prompt_field
self.reference_field = reference_field
self.prompt_transform_field = prompt_transform_field if prompt_transform_field is not None else self.prompt_field
self.reference_transform_field = reference_transform_field if reference_transform_field is not None else self.reference_field
self.output_field = output_field
self.prompt__tfidf_min_df = prompt__tfidf_min_df
self.prompt__tfidf_max_df = prompt__tfidf_max_df
self.reference__tfidf_min_df = reference__tfidf_min_df
self.reference__tfidf_max_df = reference__tfidf_max_df
self.snip_first_dim = snip_first_dim
self.svd__n_components = svd__n_components
self.default_n_types = n_types
self.random_state = random_state
self.max_dist = max_dist
self.verbosity = verbosity
def fit(self, corpus, y=None, prompt_selector=lambda utt: True, reference_selector=lambda utt: True):
"""
Fits a PromptTypes model for a corpus -- that is, learns latent representations of prompt and response terms, as well as prompt types.
:param corpus: Corpus
:param prompt_selector: a boolean function of signature `filter(utterance)` that determines which
utterances will be considered as prompts in the fit step. defaults to using all utterances which have a response.
:param reference_selector: a boolean function of signature `filter(utterance)` that determines which utterances
will be considered as responses in the fit step. defaults to using all utterances which are responses to a
prompt.
:return: None
"""
self.prompt_selector = prompt_selector
self.reference_selector = reference_selector
_, prompt_input, _, reference_input = self._get_pair_input(corpus, self.prompt_field, self.reference_field,
self.prompt_selector, self.reference_selector)
self.prompt_embedding_model = fit_prompt_embedding_model(prompt_input, reference_input,
self.snip_first_dim, self.prompt__tfidf_min_df, self.prompt__tfidf_max_df,
self.reference__tfidf_min_df, self.reference__tfidf_max_df,
self.svd__n_components, self.random_state, self.verbosity)
self.train_results['prompt_ids'], self.train_results['prompt_vects'],\
self.train_results['reference_ids'], self.train_results['reference_vects'] = self._get_embeddings(corpus, prompt_selector, reference_selector)
self.refit_types(self.default_n_types, self.random_state)
def transform(self, corpus, use_fit_selectors=True, prompt_selector=lambda utt: True, reference_selector=lambda utt: True):
"""
Computes vector representations and prompt type assignments for utterances in a corpus.
:param corpus: Corpus
:param use_fit_selectors: defaults to True, will use the same filters as the fit step to determine which utterances will be considered as prompts and responses in the transform step.
:param prompt_selector: filter that determines which utterances will be considered as prompts in the
transform step. defaults to prompt_selector, the same as is used in fit.
:param reference_selector: filter that determines which utterances will be considered as responses in the
transform step. defaults to reference_selector, the same as is used in fit.
:return: the corpus, with per-utterance representations and type assignments.
"""
if use_fit_selectors:
prompt_selector = self.prompt_selector
reference_selector = self.reference_selector
prompt_ids, prompt_vects, reference_ids, reference_vects = self._get_embeddings(corpus, prompt_selector, reference_selector)
corpus.set_vector_matrix(self.output_field + '__prompt_repr', matrix=prompt_vects, ids=prompt_ids)
corpus.set_vector_matrix(self.output_field + '__reference_repr', matrix=reference_vects, ids=reference_ids)
prompt_df, reference_df = self._get_type_assignments(prompt_ids, prompt_vects, reference_ids, reference_vects)
prompt_dists, prompt_assigns = prompt_df[prompt_df.columns[:-1]].values, prompt_df['type_id'].values
prompt_min_dists = prompt_dists.min(axis=1)
reference_dists, reference_assigns = reference_df[reference_df.columns[:-1]].values, reference_df['type_id'].values
reference_min_dists = reference_dists.min(axis=1)
corpus.set_vector_matrix(self.output_field + '__prompt_dists.%s' % self.default_n_types, ids=prompt_df.index, matrix=prompt_dists,
columns=['type_%d_dist' % x for x in range(prompt_dists.shape[1])])
corpus.set_vector_matrix(self.output_field + '__reference_dists.%s' % self.default_n_types,
ids=reference_df.index, matrix=reference_dists,
columns=['type_%d_dist' % x for x in range(prompt_dists.shape[1])])
for id, assign, dist in zip(prompt_df.index, prompt_assigns, prompt_min_dists):
corpus.get_utterance(id).add_meta(self.output_field + '__prompt_type.%s' % self.default_n_types, assign)
corpus.get_utterance(id).add_meta(self.output_field + '__prompt_type_dist.%s' % self.default_n_types, float(dist))
for id, assign, dist in zip(reference_df.index, reference_assigns, reference_min_dists):
corpus.get_utterance(id).add_meta(self.output_field + '__reference_type.%s' % self.default_n_types, assign)
corpus.get_utterance(id).add_meta(self.output_field + '__reference_type_dist.%s' % self.default_n_types, float(dist))
return corpus
def transform_utterance(self, utterance):
"""
Computes vector representations and prompt type assignments for a single utterance.
:param utterance: the utterance.
:return: the utterance, annotated with representations and type assignments.
"""
# if self.prompt_transform_filter(utterance):
utterance = self._transform_utterance_side(utterance, 'prompt')
# if self.reference_transform_filter(utterance):
utterance = self._transform_utterance_side(utterance, 'reference')
return utterance
def _transform_utterance_side(self, utterance, side):
if side == 'prompt':
input_field = self.prompt_transform_field
elif side == 'reference':
input_field = self.reference_transform_field
utt_id = utterance.id
utt_input = utterance.retrieve_meta(input_field)
if isinstance(utt_input, list):
utt_input = '\n'.join(utt_input)
utt_ids, utt_vects = transform_embeddings(self.prompt_embedding_model, [utt_id], [utt_input], side=side)
assign_df = assign_prompt_types(self.type_models[self.default_n_types], utt_ids, utt_vects, self.max_dist)
vals = assign_df.values[0]
dists = vals[:-1]
min_dist = min(dists)
assign = vals[-1]
utterance.add_meta(self.output_field + '__%s_type.%s' % (side, self.default_n_types), assign)
utterance.add_meta(self.output_field + '__%s_type_dist.%s' % (side, self.default_n_types), float(min_dist))
utterance.add_meta(self.output_field + '__%s_dists.%s' % (side, self.default_n_types), [float(x) for x in dists])
utterance.add_meta(self.output_field + '__%s_repr' % side, [float(x) for x in utt_vects[0]])
return utterance
def refit_types(self, n_types, random_state=None, name=None):
"""
Using the latent representations of prompt terms learned during the initial `fit` call, infers `n_types` prompt types. permits retraining the clustering model that determines the number of types, on top of the initial model. calling this *and* updating the `default_n_types` field of the model will result in future `transform` calls assigning utterances to one of `n_types` prompt types.
:param n_types: number of types to learn
:param random_state: random seed
:param name: the name of the new type model. defaults to n_types.
:return: None
"""
if name is None:
key = n_types
else:
key = name
if random_state is None:
random_state = self.random_state
self.type_models[key] = fit_prompt_type_model(self.prompt_embedding_model, n_types, random_state, self.max_dist, self.verbosity)
prompt_df, reference_df = self._get_type_assignments(type_key=key)
self.train_types[key] = {'prompt_df': prompt_df, 'reference_df': reference_df}
def _get_embeddings(self, corpus, prompt_selector, reference_selector):
prompt_ids, prompt_inputs = self._get_input(corpus, self.prompt_transform_field,
prompt_selector)
reference_ids, reference_inputs = self._get_input(corpus, self.reference_transform_field, reference_selector)
prompt_ids, prompt_vects = transform_embeddings(self.prompt_embedding_model,
prompt_ids, prompt_inputs,
side='prompt')
reference_ids, reference_vects = transform_embeddings(self.prompt_embedding_model,
reference_ids, reference_inputs,
side='reference')
return prompt_ids, prompt_vects, reference_ids, reference_vects
def _get_type_assignments(self, prompt_ids=None, prompt_vects=None,
reference_ids=None, reference_vects=None, type_key=None):
if prompt_ids is None:
prompt_ids, prompt_vects, reference_ids, reference_vects = [self.train_results[k] for k in
['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']]
if type_key is None:
type_key = self.default_n_types
prompt_df = assign_prompt_types(self.type_models[type_key], prompt_ids, prompt_vects, self.max_dist)
reference_df = assign_prompt_types(self.type_models[type_key], reference_ids, reference_vects, self.max_dist)
return prompt_df, reference_df
def display_type(self, type_id, corpus=None, type_key=None, k=10):
"""
For a particular prompt type, displays the representative prompt and response terms. can also display representative prompt and response utterances.
:param type_id: ID of the prompt type to display.
:param corpus: pass in the training corpus to also display representative utterances.
:param type_key: the name of the prompt type clustering model to use. defaults to `n_types` that the model was initialized with, but if `refit_types` is called with different number of types, can be modified to display this updated model as well.
:param k: the number of sample terms (or utteranceS) to display.
:return: None
"""
if type_key is None:
type_key = self.default_n_types
prompt_df = self.type_models[type_key]['prompt_df']
reference_df = self.type_models[type_key]['reference_df']
top_prompt = prompt_df[prompt_df.type_id == type_id].sort_values(type_id).head(k)
top_ref = reference_df[reference_df.type_id == type_id].sort_values(type_id).head(k)
print('top prompt:')
print(top_prompt)
print('top response:')
print(top_ref)
if corpus is not None:
prompt_df = self.train_types[type_key]['prompt_df']
reference_df = self.train_types[type_key]['reference_df']
top_prompt = prompt_df[prompt_df.type_id == type_id].sort_values(type_id).head(k).index
top_ref = reference_df[reference_df.type_id == type_id].sort_values(type_id).head(k).index
print('top prompts:')
for utt in top_prompt:
print(utt, corpus.get_utterance(utt).text)
print(corpus.get_utterance(utt).retrieve_meta(self.prompt_transform_field))
print()
print('top responses:')
for utt in top_ref:
print(utt, corpus.get_utterance(utt).text)
print(corpus.get_utterance(utt).retrieve_meta(self.reference_transform_field))
print()
def summarize(self, corpus, type_ids=None, type_key=None, k=10):
'''
Displays representative prompt and response terms and utterances for each type learned. A wrapper for `display_type`.
:param corpus: corpus to display utterances for (must have `transform()` called on it)
:param type_ids: ID of the prompt type to display. if None, will display all types.
:param type_key: the name of the prompt type clustering model to use. defaults to `n_types` that the model was initialized with, but if `refit_types` is called with different number of types, can be modified to display this updated model as well.
:param k: the number of sample terms (or utteranceS) to display.
:return: None
'''
if type_key is None:
type_key = self.default_n_types
n_types = self.type_models[type_key]['km_model'].n_clusters
if type_ids is None:
type_ids = list(range(n_types))
if not isinstance(type_ids, list):
type_ids = [type_ids]
for type_id in type_ids:
print('TYPE', type_id)
self.display_type(type_id, corpus, type_key, k)
print('====')
def dump_model(self, model_dir, type_keys='default', dump_train_corpus=True):
"""
Dumps the model to disk.
:param model_dir: directory to write model to
:param type_keys: if 'default', will only write the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', will write all clustering models that have been trained via calls to `refit_types`. can also take a list of clustering models.
:param dump_train_corpus: whether to also write the representations and type assignments of the training corpus. defaults to True.
:return: None
"""
if self.verbosity > 0:
print('dumping embedding model')
if not os.path.exists(model_dir):
try:
os.mkdir(model_dir)
except:
pass
for k in ['prompt_tfidf_model', 'reference_tfidf_model', 'svd_model']:
joblib.dump(self.prompt_embedding_model[k],
os.path.join(model_dir, k + '.joblib'))
for k in ['U_prompt', 'U_reference']:
np.save(os.path.join(model_dir, k), self.prompt_embedding_model[k])
if dump_train_corpus:
if self.verbosity > 0:
print('dumping training embeddings')
for k in ['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']:
np.save(os.path.join(model_dir, 'train_' + k), self.train_results[k])
if type_keys == 'default':
to_dump = [self.default_n_types]
elif type_keys == 'all':
to_dump = self.type_models.keys()
else:
to_dump = type_keys
for key in to_dump:
if self.verbosity > 0:
print('dumping type model', key)
type_model = self.type_models[key]
joblib.dump(type_model['km_model'], os.path.join(model_dir, 'km_model.%s.joblib' % key))
for k in ['prompt_df', 'reference_df']:
type_model[k].to_csv(os.path.join(model_dir, '%s.%s.tsv' % (k, key)), sep='\t')
if dump_train_corpus:
train_types = self.train_types[key]
for k in ['prompt_df', 'reference_df']:
train_types[k].to_csv(os.path.join(model_dir, 'train_%s.%s.tsv' % (k, key)), sep='\t')
def get_model(self, type_keys='default'):
"""
Returns the model as a dictionary containing:
* embedding_model: stores information pertaining to the vector representations.
* prompt_tfidf_model: sklearn tf-idf model that converts prompt input to term-document matrix
* reference_tfidf_model: tf-idf model that converts response input to term-document matrix
* svd_model: sklearn TruncatedSVD model that produces a low-dimensional representation of responses and prompts
* U_prompt: vector representations of prompt terms
* U_reference: vector representations of response terms
* type_models: a dictionary mapping each type clustering model to:
* km_model: a sklearn KMeans model of the learned types
* prompt_df: distances to cluster centroids, and type assignments, of prompt terms
* reference_df: distances to cluster centroids, and type assignments, of reference terms
:param type_keys: if 'default', will return the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', returns all clustering models that have been trained via calls to `refit_types`. can also take a list of clustering models.
:return: the prompt types model
"""
if type_keys == 'default':
to_get = [self.default_n_types]
elif type_keys == 'all':
to_get = self.type_models.keys()
else:
to_get = type_keys
to_return = {'embedding_model': self.prompt_embedding_model,
'type_models': {k: self.type_models[k] for k in to_get}}
return to_return
def load_model(self, model_dir, type_keys='default', load_train_corpus=True):
"""
Loads the model from disk.
:param model_dir: directory to read model to
:param type_keys: if 'default', will only read the type clustering model corresponding to the `n_types` the model was initialized with. if 'all', will read all clustering models that are available in directory. can also take a list of clustering models.
:param load_train_corpus: whether to also read the representations and type assignments of the training corpus. defaults to True.
:return: None
"""
if self.verbosity > 0:
print('loading embedding model')
for k in ['prompt_tfidf_model', 'reference_tfidf_model', 'svd_model']:
self.prompt_embedding_model[k] = joblib.load(os.path.join(model_dir, k + '.joblib'))
for k in ['U_prompt', 'U_reference']:
self.prompt_embedding_model[k] = np.load(os.path.join(model_dir, k + '.npy'))
if load_train_corpus:
if self.verbosity > 0:
print('loading training embeddings')
for k in ['prompt_ids', 'prompt_vects', 'reference_ids', 'reference_vects']:
self.train_results[k] = np.load(os.path.join(model_dir, 'train_' + k + '.npy'))
if type_keys == 'default':
to_load = [self.default_n_types]
elif type_keys == 'all':
to_load = [x.replace('km_model.','').replace('.joblib','')
for x in os.listdir(model_dir) if x.startswith('km_model')]
else:
to_load = type_keys
for key in to_load:
try:
key = int(key)
except: pass
if self.verbosity > 0:
print('loading type model', key)
self.type_models[key] = {} # this should be an int-ish
self.type_models[key]['km_model'] = joblib.load(
os.path.join(model_dir, 'km_model.%s.joblib' % key))
for k in ['prompt_df', 'reference_df']:
self.type_models[key][k] =\
pd.read_csv(os.path.join(model_dir, '%s.%s.tsv' % (k, key)), sep='\t', index_col=0)
self.type_models[key][k].columns = [int(x) for x in self.type_models[key][k].columns[:-1]]\
+ ['type_id']
if load_train_corpus:
self.train_types[key] = {}
for k in ['prompt_df', 'reference_df']:
self.train_types[key][k] = pd.read_csv(
os.path.join(model_dir, 'train_%s.%s.tsv' % (k, key)), sep='\t', index_col=0
)
self.train_types[key][k].columns = \
[int(x) for x in self.train_types[key][k].columns[:-1]] + ['type_id']
def _get_input(self, corpus, field, filter_fn, check_nonempty=True):
ids = []
inputs = []
for utterance in corpus.iter_utterances():
input = utterance.retrieve_meta(field)
if isinstance(input, list):
input = '\n'.join(input)
if filter_fn(utterance)\
and ((not check_nonempty) or (len(input) > 0)):
ids.append(utterance.id)
inputs.append(input)
return ids, inputs
def _get_pair_input(self, corpus, prompt_field, reference_field,
prompt_selector, reference_selector,
check_nonempty=True):
prompt_ids = []
prompt_utts = []
reference_ids = []
reference_utts = []
for reference_utt in corpus.iter_utterances():
if reference_utt.reply_to is None:
continue
prompt_utt_id = reference_utt.reply_to
try:
prompt_utt = corpus.get_utterance(prompt_utt_id)
except:
continue
if prompt_selector(prompt_utt) \
and reference_selector(reference_utt):
prompt_input = prompt_utt.retrieve_meta(prompt_field)
reference_input = reference_utt.retrieve_meta(reference_field)
if (prompt_input is None) or (reference_input is None):
continue
if isinstance(prompt_input, list):
prompt_input = '\n'.join(prompt_input)
if isinstance(reference_input, list):
reference_input = '\n'.join(reference_input)
if (not check_nonempty) or ((len(prompt_input) > 0) and (len(reference_input) > 0)):
prompt_ids.append(prompt_utt.id)
prompt_utts.append(prompt_input)
reference_ids.append(reference_utt.id)
reference_utts.append(reference_input)
return prompt_ids, prompt_utts, reference_ids, reference_utts
def fit_prompt_embedding_model(prompt_input, reference_input, snip_first_dim=True,
prompt__tfidf_min_df=100, prompt__tfidf_max_df=.1,
reference__tfidf_min_df=100, reference__tfidf_max_df=.1,
svd__n_components=25, random_state=None, verbosity=0):
"""
Standalone function that fits an embedding model given paired prompt and response inputs. See docstring of the `PromptTypes` class for details.
:param prompt_input: list of prompts (represented as space-separated strings of terms)
:param reference_input: list of responses (represented as space-separated strings of terms). note that each entry of reference_input should be a response to the corresponding entry in prompt_input.
:return: prompt embedding model
"""
if verbosity > 0:
print('fitting %d input pairs' % len(prompt_input))
print('fitting reference tfidf model')
reference_tfidf_model = TfidfVectorizer(
min_df=reference__tfidf_min_df,
max_df=reference__tfidf_max_df,
binary=True,
token_pattern=r'(?u)(\S+)'
)
reference_vect = reference_tfidf_model.fit_transform(reference_input)
if verbosity > 0:
print('fitting prompt tfidf model')
prompt_tfidf_model = TfidfVectorizer(
min_df=prompt__tfidf_min_df,
max_df=prompt__tfidf_max_df,
binary=True,
token_pattern=r'(?u)(\S+)'
)
prompt_vect = prompt_tfidf_model.fit_transform(prompt_input)
if verbosity > 0:
print('fitting svd model')
svd_model = TruncatedSVD(n_components=svd__n_components, random_state=random_state, algorithm='arpack')
U_reference = svd_model.fit_transform(normalize(reference_vect.T))
s = svd_model.singular_values_
U_reference /= s
U_prompt = (svd_model.components_ * normalize(prompt_vect, axis=0) / s[:, np.newaxis]).T
if snip_first_dim:
U_prompt = U_prompt[:, 1:]
U_reference = U_reference[:, 1:]
U_prompt_norm = normalize(U_prompt)
U_reference_norm = normalize(U_reference)
return {'prompt_tfidf_model': prompt_tfidf_model, 'reference_tfidf_model': reference_tfidf_model,
'svd_model': svd_model, 'U_prompt': U_prompt_norm, 'U_reference': U_reference_norm}
def transform_embeddings(model, ids, input, side='prompt', filter_empty=True):
"""
Standalone function that returns vector representations of input text given a trained PromptTypes prompt_embedding_model. See docstring of `PromptTypes` class for details.
:param model: prompt embedding model
:param ids: ids of input text
:param input: a list where each entry has corresponding id in the ids argument, and is a string of terms corresponding to an utterance.
:param side: whether to return prompt or response embeddings ("prompt" and "reference" respectively); defaults to "prompt"
:param filter_empty: if `True`, will not return embeddings for prompts with no terms.
:return: input IDs `ids`, and corresponding vector representations of input `vect`
"""
tfidf_vects = normalize(model['%s_tfidf_model' % side].transform(input), norm='l1')
mask = np.array(tfidf_vects.sum(axis=1)).flatten() > 0
vects = normalize(tfidf_vects * model['U_%s' % side])
if filter_empty:
ids = np.array(ids)[mask]
vects = vects[mask]
return ids, vects
def fit_prompt_type_model(model, n_types, random_state=None, max_dist=0.9, verbosity=0):
"""
Standalone function that fits a prompt type model given paired prompt and response inputs. See docstring of the `PromptTypes` class for details.
:param model: prompt embedding model (from `fit_prompt_embedding_model()`)
:param n_types: number of prompt types to infer
:return: prompt type model
"""
if verbosity > 0:
print('fitting %d prompt types' % n_types)
km = KMeans(n_clusters=n_types, random_state=random_state)
km.fit(model['U_prompt'])
prompt_dists = km.transform(model['U_prompt'])
prompt_clusters = km.predict(model['U_prompt'])
prompt_clusters[prompt_dists.min(axis=1) >= max_dist] = -1
reference_dists = km.transform(model['U_reference'])
reference_clusters = km.predict(model['U_reference'])
reference_clusters[reference_dists.min(axis=1) >= max_dist] = -1
prompt_df = pd.DataFrame(index=model['prompt_tfidf_model'].get_feature_names(),
data=np.hstack([prompt_dists, prompt_clusters[:,np.newaxis]]),
columns=list(range(n_types)) + ['type_id'])
reference_df = pd.DataFrame(index=model['reference_tfidf_model'].get_feature_names(),
data=np.hstack([reference_dists, reference_clusters[:,np.newaxis]]),
columns=list(range(n_types)) + ['type_id'])
return {'km_model': km,
'prompt_df': prompt_df, 'reference_df': reference_df}
def assign_prompt_types(model, ids, vects, max_dist=0.9):
"""
Standalone function that returns type assignments of input vectors given a trained PromptTypes type model. See docstring of `PromptTypes` class for details.
:param model: prompt type model
:param ids: ids of input vectors
:param vects: input vectors
:return: a dataframe storing cluster centroid distances and the assigned type.
"""
dists = model['km_model'].transform(vects)
clusters = model['km_model'].predict(vects)
dist_mask = dists.min(axis=1) >= max_dist
clusters[ dist_mask] = -1
df = pd.DataFrame(index=ids, data=np.hstack([dists,clusters[:,np.newaxis]]),
columns=list(range(dists.shape[1])) + ['type_id'])
return df
| [
"os.mkdir",
"sklearn.decomposition.TruncatedSVD",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.cluster.KMeans",
"os.path.exists",
"numpy.hstack",
"numpy.array",
"sklearn.preprocessing.normalize",
"os.path.join",
"os.listdir"
] | [((29724, 29849), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': 'reference__tfidf_min_df', 'max_df': 'reference__tfidf_max_df', 'binary': '(True)', 'token_pattern': '"""(?u)(\\\\S+)"""'}), "(min_df=reference__tfidf_min_df, max_df=\n reference__tfidf_max_df, binary=True, token_pattern='(?u)(\\\\S+)')\n", (29739, 29849), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((30049, 30167), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': 'prompt__tfidf_min_df', 'max_df': 'prompt__tfidf_max_df', 'binary': '(True)', 'token_pattern': '"""(?u)(\\\\S+)"""'}), "(min_df=prompt__tfidf_min_df, max_df=prompt__tfidf_max_df,\n binary=True, token_pattern='(?u)(\\\\S+)')\n", (30064, 30167), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((30341, 30436), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'svd__n_components', 'random_state': 'random_state', 'algorithm': '"""arpack"""'}), "(n_components=svd__n_components, random_state=random_state,\n algorithm='arpack')\n", (30353, 30436), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((30777, 30796), 'sklearn.preprocessing.normalize', 'normalize', (['U_prompt'], {}), '(U_prompt)\n', (30786, 30796), False, 'from sklearn.preprocessing import normalize\n'), ((30820, 30842), 'sklearn.preprocessing.normalize', 'normalize', (['U_reference'], {}), '(U_reference)\n', (30829, 30842), False, 'from sklearn.preprocessing import normalize\n'), ((31993, 32038), 'sklearn.preprocessing.normalize', 'normalize', (["(tfidf_vects * model['U_%s' % side])"], {}), "(tfidf_vects * model['U_%s' % side])\n", (32002, 32038), False, 'from sklearn.preprocessing import normalize\n'), ((32645, 32698), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_types', 'random_state': 'random_state'}), '(n_clusters=n_types, random_state=random_state)\n', (32651, 32698), False, 'from sklearn.cluster import KMeans\n'), ((30479, 30506), 'sklearn.preprocessing.normalize', 'normalize', (['reference_vect.T'], {}), '(reference_vect.T)\n', (30488, 30506), False, 'from sklearn.preprocessing import normalize\n'), ((20582, 20607), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (20596, 20607), False, 'import os\n'), ((32074, 32087), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (32082, 32087), True, 'import numpy as np\n'), ((33195, 33252), 'numpy.hstack', 'np.hstack', (['[prompt_dists, prompt_clusters[:, np.newaxis]]'], {}), '([prompt_dists, prompt_clusters[:, np.newaxis]])\n', (33204, 33252), True, 'import numpy as np\n'), ((33444, 33507), 'numpy.hstack', 'np.hstack', (['[reference_dists, reference_clusters[:, np.newaxis]]'], {}), '([reference_dists, reference_clusters[:, np.newaxis]])\n', (33453, 33507), True, 'import numpy as np\n'), ((34306, 34349), 'numpy.hstack', 'np.hstack', (['[dists, clusters[:, np.newaxis]]'], {}), '([dists, clusters[:, np.newaxis]])\n', (34315, 34349), True, 'import numpy as np\n'), ((20642, 20661), 'os.mkdir', 'os.mkdir', (['model_dir'], {}), '(model_dir)\n', (20650, 20661), False, 'import os\n'), ((20861, 20899), 'os.path.join', 'os.path.join', (['model_dir', "(k + '.joblib')"], {}), "(model_dir, k + '.joblib')\n", (20873, 20899), False, 'import os\n'), ((20968, 20994), 'os.path.join', 'os.path.join', (['model_dir', 'k'], {}), '(model_dir, k)\n', (20980, 20994), False, 'import os\n'), ((21735, 21786), 'os.path.join', 'os.path.join', (['model_dir', "('km_model.%s.joblib' % key)"], {}), "(model_dir, 'km_model.%s.joblib' % key)\n", (21747, 21786), False, 'import os\n'), ((24756, 24794), 'os.path.join', 'os.path.join', (['model_dir', "(k + '.joblib')"], {}), "(model_dir, k + '.joblib')\n", (24768, 24794), False, 'import os\n'), ((24895, 24930), 'os.path.join', 'os.path.join', (['model_dir', "(k + '.npy')"], {}), "(model_dir, k + '.npy')\n", (24907, 24930), False, 'import os\n'), ((25878, 25929), 'os.path.join', 'os.path.join', (['model_dir', "('km_model.%s.joblib' % key)"], {}), "(model_dir, 'km_model.%s.joblib' % key)\n", (25890, 25929), False, 'import os\n'), ((30604, 30634), 'sklearn.preprocessing.normalize', 'normalize', (['prompt_vect'], {'axis': '(0)'}), '(prompt_vect, axis=0)\n', (30613, 30634), False, 'from sklearn.preprocessing import normalize\n'), ((21260, 21297), 'os.path.join', 'os.path.join', (['model_dir', "('train_' + k)"], {}), "(model_dir, 'train_' + k)\n", (21272, 21297), False, 'import os\n'), ((21877, 21924), 'os.path.join', 'os.path.join', (['model_dir', "('%s.%s.tsv' % (k, key))"], {}), "(model_dir, '%s.%s.tsv' % (k, key))\n", (21889, 21924), False, 'import os\n'), ((25188, 25234), 'os.path.join', 'os.path.join', (['model_dir', "('train_' + k + '.npy')"], {}), "(model_dir, 'train_' + k + '.npy')\n", (25200, 25234), False, 'import os\n'), ((26060, 26107), 'os.path.join', 'os.path.join', (['model_dir', "('%s.%s.tsv' % (k, key))"], {}), "(model_dir, '%s.%s.tsv' % (k, key))\n", (26072, 26107), False, 'import os\n'), ((22120, 22173), 'os.path.join', 'os.path.join', (['model_dir', "('train_%s.%s.tsv' % (k, key))"], {}), "(model_dir, 'train_%s.%s.tsv' % (k, key))\n", (22132, 22173), False, 'import os\n'), ((25452, 25473), 'os.listdir', 'os.listdir', (['model_dir'], {}), '(model_dir)\n', (25462, 25473), False, 'import os\n'), ((26491, 26544), 'os.path.join', 'os.path.join', (['model_dir', "('train_%s.%s.tsv' % (k, key))"], {}), "(model_dir, 'train_%s.%s.tsv' % (k, key))\n", (26503, 26544), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 10:23:53 2013
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>
@email: <EMAIL>, <EMAIL>
@license: BSD 3-clause.
"""
import os
import unittest
import tempfile
import numpy as np
import scipy as sp
from parsimony.algorithms.nipals import RankOneSVD
from parsimony.algorithms.nipals import RankOneSparseSVD
import parsimony.utils as utils
try:
from .tests import TestCase # When imported as a package.
except ValueError:
from tests import TestCase # When run as a program.
def generate_sparse_matrix(shape, density=0.10):
"""
Examples
--------
>>> shape = (5, 5)
>>> density = 0.2
>>> print generate_sparse_matrix(shape, density) # doctest: +SKIP
[[ 0. 0. 0. 0. 0. ]
[ 0. 0. 0.95947611 0. 0. ]
[ 0. 0. 0. 0.12626569 0. ]
[ 0. 0.51318651 0. 0. 0. ]
[ 0. 0. 0. 0. 0.92133575]]
"""
# shape = (5, 5)
# density = 0.1
num_elements = 1
for i in range(len(shape)):
num_elements = num_elements * shape[i]
zero_vec = np.zeros(num_elements, dtype=float)
indices = np.random.random_integers(0,
num_elements - 1,
int(density * num_elements))
zero_vec[indices] = np.random.random_sample(len(indices))
sparse_mat = np.reshape(zero_vec, shape)
return sparse_mat
class TestSVD(TestCase):
def get_err_by_np_linalg_svd(self, computed_v, X):
# svd from numpy array
U, s_np, V = np.linalg.svd(X)
np_v = V[[0], :].T
sign = np.dot(computed_v.T, np_v)[0][0]
np_v_new = np_v * sign
err = np.linalg.norm(computed_v - np_v_new)
return err
def get_err_by_sp_sparse_linalg_svds(self, computed_v, X):
# svd from numpy array
X = sp.sparse.csr_matrix(X)
U, s_np, V = sp.sparse.linalg.svds(X, k=1)
np_v = V[[0], :].T
sign = np.dot(computed_v.T, np_v)[0][0]
np_v_new = np_v * sign
err = np.linalg.norm(computed_v - np_v_new)
return err
def get_err_fast_svd(self, nrow, ncol):
np.random.seed(0)
X = np.random.random((nrow, ncol))
# svd from parsimony
fast_svd = RankOneSVD(max_iter=1000)
parsimony_v = fast_svd.run(X)
return self.get_err_by_np_linalg_svd(parsimony_v, X)
def test_fast_svd(self):
err = self.get_err_fast_svd(50, 50)
self.assertTrue(err < utils.consts.TOLERANCE,
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE))
err = self.get_err_fast_svd(5000, 5)
self.assertTrue(err < utils.consts.TOLERANCE,
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE))
err = self.get_err_fast_svd(5, 5000)
self.assertTrue(err < utils.consts.TOLERANCE * 1000,
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE * 1000))
def get_err_fast_sparse_svd(self, nrow, ncol, density):
X = generate_sparse_matrix(shape=(nrow, ncol),
density=density)
# For debug
# np.save("/tmp/X_%d_%d.npy" % (nrow, ncol), X)
fd = None
try:
fd, tmpfilename = tempfile.mkstemp(suffix=".npy",
prefix="X_%d_%d" % (nrow, ncol))
np.save(tmpfilename, X)
finally:
if fd is not None:
os.close(fd)
# svd from parsimony
fast_sparse_svd = RankOneSparseSVD(max_iter=1000)
parsimony_v = fast_sparse_svd.run(X)
# return self.get_err_by_np_linalg_svd(parsimony_v, X)
return self.get_err_by_sp_sparse_linalg_svds(parsimony_v, X)
def test_fast_sparse_svd(self):
err = self.get_err_fast_sparse_svd(50, 50, density=0.1)
self.assertTrue(err < (utils.consts.TOLERANCE * 100),
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE * 100))
err = self.get_err_fast_sparse_svd(500, 5000, density=0.1)
self.assertTrue(err < (utils.consts.TOLERANCE * 100),
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE))
err = self.get_err_fast_sparse_svd(5000, 500, density=0.1)
self.assertTrue(err < (utils.consts.TOLERANCE * 100),
"Error too big : %g > %g tolerance" %
(err, utils.consts.TOLERANCE))
if __name__ == '__main__':
import doctest
doctest.testmod()
unittest.main()
| [
"unittest.main",
"numpy.save",
"numpy.random.seed",
"tempfile.mkstemp",
"scipy.sparse.linalg.svds",
"parsimony.algorithms.nipals.RankOneSparseSVD",
"numpy.zeros",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.reshape",
"scipy.sparse.csr_matrix",
"numpy.random.random",
"parsimony.algorithms... | [((1283, 1318), 'numpy.zeros', 'np.zeros', (['num_elements'], {'dtype': 'float'}), '(num_elements, dtype=float)\n', (1291, 1318), True, 'import numpy as np\n'), ((1568, 1595), 'numpy.reshape', 'np.reshape', (['zero_vec', 'shape'], {}), '(zero_vec, shape)\n', (1578, 1595), True, 'import numpy as np\n'), ((4912, 4929), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4927, 4929), False, 'import doctest\n'), ((4934, 4949), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4947, 4949), False, 'import unittest\n'), ((1753, 1769), 'numpy.linalg.svd', 'np.linalg.svd', (['X'], {}), '(X)\n', (1766, 1769), True, 'import numpy as np\n'), ((1891, 1928), 'numpy.linalg.norm', 'np.linalg.norm', (['(computed_v - np_v_new)'], {}), '(computed_v - np_v_new)\n', (1905, 1928), True, 'import numpy as np\n'), ((2056, 2079), 'scipy.sparse.csr_matrix', 'sp.sparse.csr_matrix', (['X'], {}), '(X)\n', (2076, 2079), True, 'import scipy as sp\n'), ((2102, 2131), 'scipy.sparse.linalg.svds', 'sp.sparse.linalg.svds', (['X'], {'k': '(1)'}), '(X, k=1)\n', (2123, 2131), True, 'import scipy as sp\n'), ((2253, 2290), 'numpy.linalg.norm', 'np.linalg.norm', (['(computed_v - np_v_new)'], {}), '(computed_v - np_v_new)\n', (2267, 2290), True, 'import numpy as np\n'), ((2364, 2381), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2378, 2381), True, 'import numpy as np\n'), ((2394, 2424), 'numpy.random.random', 'np.random.random', (['(nrow, ncol)'], {}), '((nrow, ncol))\n', (2410, 2424), True, 'import numpy as np\n'), ((2473, 2498), 'parsimony.algorithms.nipals.RankOneSVD', 'RankOneSVD', ([], {'max_iter': '(1000)'}), '(max_iter=1000)\n', (2483, 2498), False, 'from parsimony.algorithms.nipals import RankOneSVD\n'), ((3874, 3905), 'parsimony.algorithms.nipals.RankOneSparseSVD', 'RankOneSparseSVD', ([], {'max_iter': '(1000)'}), '(max_iter=1000)\n', (3890, 3905), False, 'from parsimony.algorithms.nipals import RankOneSparseSVD\n'), ((3593, 3657), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".npy"""', 'prefix': "('X_%d_%d' % (nrow, ncol))"}), "(suffix='.npy', prefix='X_%d_%d' % (nrow, ncol))\n", (3609, 3657), False, 'import tempfile\n'), ((3717, 3740), 'numpy.save', 'np.save', (['tmpfilename', 'X'], {}), '(tmpfilename, X)\n', (3724, 3740), True, 'import numpy as np\n'), ((1813, 1839), 'numpy.dot', 'np.dot', (['computed_v.T', 'np_v'], {}), '(computed_v.T, np_v)\n', (1819, 1839), True, 'import numpy as np\n'), ((2175, 2201), 'numpy.dot', 'np.dot', (['computed_v.T', 'np_v'], {}), '(computed_v.T, np_v)\n', (2181, 2201), True, 'import numpy as np\n'), ((3805, 3817), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (3813, 3817), False, 'import os\n')] |
from selsum.utils.posterior_generator import PosteriorGenerator
from shared_lib.utils.helpers.general import flatten
from selsum.utils.constants.model import FEATS, FEATS_MASK
from selsum.utils.helpers.collators import collate_subsampled
from selsum.data.abs_dataset import collate as abs_collate
import numpy as np
from copy import copy
from fairseq.utils import apply_to_sample
def subsample(distr_model, sample, rsample_num, pad_indx, eos_indx, mask_idx,
ndocs=1, sample_type='inf'):
"""Performs sub-sampling by selecting a fixed maximum number of reviews.
Wraps it back to a batch that can be directly fed to the model.
Multiple random samples are allocated along the first axis, i.e.,
(bsz*rsample_num, *)
Args:
distr_model: module that yields logits.
ndocs: how many document ids to sample from the distribution.
"""
assert sample_type in ['posterior', 'posterior_greedy', 'prior']
feats = sample['net_input'][FEATS]
feats_mask = sample['net_input'][FEATS_MASK]
src = sample['extra']['source']
tgt = sample['extra']['target']
id = sample['extra']['id']
bs, padded_total_docs = feats.shape[:2]
# repeating to accommodate multiple samples per data-point
feats = feats.unsqueeze(1).repeat((1, rsample_num, 1, 1)) \
.view(bs * rsample_num, padded_total_docs, -1)
feats_mask = feats_mask.unsqueeze(1).repeat((1, rsample_num, 1)) \
.view(bs * rsample_num, padded_total_docs)
src = repeat_list_tensors(src, nsamples=rsample_num, by_ref=True)
tgt = repeat_list_tensors(tgt, nsamples=rsample_num, by_ref=True)
id = flatten([[i for _ in range(rsample_num)] for i in id])
rev_counts = (~feats_mask).sum(-1).cpu().numpy()
if sample_type == 'prior' or max(rev_counts) <= ndocs :
sel_indxs = sample_from_p(sample_size=ndocs,
total_rev_counts=rev_counts)
elif sample_type in ['posterior', 'posterior_greedy']:
greedy = sample_type == 'posterior_greedy'
feat_sample = {'net_input': {FEATS: feats, FEATS_MASK: feats_mask}}
sel_indxs, _ = sample_from_q(model=distr_model, sample=feat_sample,
bos_idx=-1, pad_idx=-1,
sample_size=ndocs, greedy=greedy)
sel_indxs = sel_indxs.cpu().numpy()
else:
raise NotImplementedError
assert len(feats) == len(feats_mask) == len(src)
coll = []
for indx in range(len(sel_indxs)):
_id = id[indx]
_feats = feats[indx]
_feats_mask = feats_mask[indx]
_sel_indxs = sel_indxs[indx]
_src = src[indx]
_tgt = tgt[indx] if tgt is not None else None
_ndocs = rev_counts[indx].item()
# removing all padded sampled documents because the number of
# selected documents can't exceed the number of documents in the
# collection
_sel_indxs = _sel_indxs[:_ndocs]
assert isinstance(_src, list)
_subs_src = [_src[i] for i in _sel_indxs]
# storing to the collector
coll.append({'source': _subs_src, 'target': _tgt,
'sel_indxs': _sel_indxs, 'id': _id, 'feats': _feats,
'feats_mask': _feats_mask})
new_sample = abs_collate(coll, pad_idx=pad_indx, eos_idx=eos_indx,
mask_idx=mask_idx)
new_sample = collate_subsampled(coll, new_sample)
new_sample = apply_to_sample(lambda tensor: tensor.to(feats.device),
new_sample)
return new_sample
def sample_from_q(model, sample, sample_size, bos_idx=-1, pad_idx=-1,
greedy=False, temperature=1.):
"""Auto-regressively samples from the approximate posterior `sample_size` times.
Args:
model: posterior model.
sample (dict): containing 'net_input' with features and mask.
sample_size (int): the number of times to sample from the posterior.
temperature (float): used in sampling to re-scale scores.
"""
seq_sampler = PosteriorGenerator(model=model, pad_idx=pad_idx,
bos_idx=bos_idx, greedy=greedy,
temperature=temperature)
doc_indxs, probs = seq_sampler(sample=sample, max_seq_len=sample_size)
return doc_indxs, probs
def sample_from_p(sample_size, total_rev_counts):
"""Samples from the flat categorical distribution without replacement.
Args:
sample_size (int): the number of samples to yield for each data-point.
total_rev_counts (list): total number of reviews per each data-point.
"""
samples = []
for rc in total_rev_counts:
if rc > sample_size:
sample = np.random.choice(rc, replace=False, size=sample_size)
else:
sample = np.arange(rc)
samples.append(sample)
return samples
def repeat_list_tensors(lst_tens, nsamples, by_ref=False):
"""Repeats a list of tensors either by copying or by reference."""
res = []
for l in lst_tens:
res += [l if by_ref else copy(l) for _ in range(nsamples)]
return res
def create_flat_distr(support_size, sel_indxs):
"""Creates a flat categorical distribution without replacement."""
flat_distr = [np.ones((support_size,)) / support_size
for i in range(len(sel_indxs))]
return flat_distr
| [
"selsum.utils.posterior_generator.PosteriorGenerator",
"selsum.data.abs_dataset.collate",
"copy.copy",
"numpy.ones",
"numpy.arange",
"numpy.random.choice",
"selsum.utils.helpers.collators.collate_subsampled"
] | [((3295, 3367), 'selsum.data.abs_dataset.collate', 'abs_collate', (['coll'], {'pad_idx': 'pad_indx', 'eos_idx': 'eos_indx', 'mask_idx': 'mask_idx'}), '(coll, pad_idx=pad_indx, eos_idx=eos_indx, mask_idx=mask_idx)\n', (3306, 3367), True, 'from selsum.data.abs_dataset import collate as abs_collate\n'), ((3414, 3450), 'selsum.utils.helpers.collators.collate_subsampled', 'collate_subsampled', (['coll', 'new_sample'], {}), '(coll, new_sample)\n', (3432, 3450), False, 'from selsum.utils.helpers.collators import collate_subsampled\n'), ((4080, 4190), 'selsum.utils.posterior_generator.PosteriorGenerator', 'PosteriorGenerator', ([], {'model': 'model', 'pad_idx': 'pad_idx', 'bos_idx': 'bos_idx', 'greedy': 'greedy', 'temperature': 'temperature'}), '(model=model, pad_idx=pad_idx, bos_idx=bos_idx, greedy=\n greedy, temperature=temperature)\n', (4098, 4190), False, 'from selsum.utils.posterior_generator import PosteriorGenerator\n'), ((4765, 4818), 'numpy.random.choice', 'np.random.choice', (['rc'], {'replace': '(False)', 'size': 'sample_size'}), '(rc, replace=False, size=sample_size)\n', (4781, 4818), True, 'import numpy as np\n'), ((4854, 4867), 'numpy.arange', 'np.arange', (['rc'], {}), '(rc)\n', (4863, 4867), True, 'import numpy as np\n'), ((5307, 5331), 'numpy.ones', 'np.ones', (['(support_size,)'], {}), '((support_size,))\n', (5314, 5331), True, 'import numpy as np\n'), ((5119, 5126), 'copy.copy', 'copy', (['l'], {}), '(l)\n', (5123, 5126), False, 'from copy import copy\n')] |
import os.path as op
from sklearn.externals import joblib as jl
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import f_classif, SelectPercentile
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, f1_score
from skbold.postproc import MvpResults
import numpy as np
pipe = Pipeline([
('ufs', SelectPercentile(score_func=f_classif, percentile=10)),
('scaler', StandardScaler()),
('clf', SVC(kernel='linear', C=1.0))
])
mvp = jl.load('MVP/mvp_across_subjects.jl')
unique_dirs = np.unique(mvp.directories)
new_X = np.zeros((len(unique_dirs), mvp.X.shape[1]))
new_y = np.zeros(len(unique_dirs))
for i, fdir in enumerate(unique_dirs):
idx = np.array([fdir == this_dir for this_dir in mvp.directories])
new_X[i, :] = mvp.X[idx, :].mean(axis=0)
new_y[i] = np.unique(mvp.y[idx])
mvp.y = new_y
mvp.X = new_X
mvp_results = MvpResults(mvp=mvp, type_model='classification', n_iter=10,
feature_scoring='forward', verbose=True,
accuracy=accuracy_score,
f1_score=f1_score)
out_dir = op.join('RESULTS', 'TRAIN', 'ACROSS_SUBS', 'CONDITION_AVERAGE')
skf = StratifiedKFold(n_splits=10)
for train_idx, test_idx in skf.split(X=mvp.X, y=mvp.y):
X_train, y_train = mvp.X[train_idx], mvp.y[train_idx]
X_test, y_test = mvp.X[test_idx], mvp.y[test_idx]
pipe.fit(X_train, y_train)
pred = pipe.predict(X_test)
mvp_results.update(pipeline=pipe, test_idx=test_idx, y_pred=pred)
mvp_results.compute_scores(maps_to_tstat=False)
mvp_results.write(out_path=out_dir)
| [
"sklearn.preprocessing.StandardScaler",
"sklearn.svm.SVC",
"skbold.postproc.MvpResults",
"sklearn.feature_selection.SelectPercentile",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"sklearn.externals.joblib.load",
"os.path.join",
"numpy.unique"
] | [((581, 618), 'sklearn.externals.joblib.load', 'jl.load', (['"""MVP/mvp_across_subjects.jl"""'], {}), "('MVP/mvp_across_subjects.jl')\n", (588, 618), True, 'from sklearn.externals import joblib as jl\n'), ((633, 659), 'numpy.unique', 'np.unique', (['mvp.directories'], {}), '(mvp.directories)\n', (642, 659), True, 'import numpy as np\n'), ((986, 1135), 'skbold.postproc.MvpResults', 'MvpResults', ([], {'mvp': 'mvp', 'type_model': '"""classification"""', 'n_iter': '(10)', 'feature_scoring': '"""forward"""', 'verbose': '(True)', 'accuracy': 'accuracy_score', 'f1_score': 'f1_score'}), "(mvp=mvp, type_model='classification', n_iter=10, feature_scoring\n ='forward', verbose=True, accuracy=accuracy_score, f1_score=f1_score)\n", (996, 1135), False, 'from skbold.postproc import MvpResults\n'), ((1217, 1280), 'os.path.join', 'op.join', (['"""RESULTS"""', '"""TRAIN"""', '"""ACROSS_SUBS"""', '"""CONDITION_AVERAGE"""'], {}), "('RESULTS', 'TRAIN', 'ACROSS_SUBS', 'CONDITION_AVERAGE')\n", (1224, 1280), True, 'import os.path as op\n'), ((1288, 1316), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (1303, 1316), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((799, 861), 'numpy.array', 'np.array', (['[(fdir == this_dir) for this_dir in mvp.directories]'], {}), '([(fdir == this_dir) for this_dir in mvp.directories])\n', (807, 861), True, 'import numpy as np\n'), ((920, 941), 'numpy.unique', 'np.unique', (['mvp.y[idx]'], {}), '(mvp.y[idx])\n', (929, 941), True, 'import numpy as np\n'), ((440, 493), 'sklearn.feature_selection.SelectPercentile', 'SelectPercentile', ([], {'score_func': 'f_classif', 'percentile': '(10)'}), '(score_func=f_classif, percentile=10)\n', (456, 493), False, 'from sklearn.feature_selection import f_classif, SelectPercentile\n'), ((511, 527), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (525, 527), False, 'from sklearn.preprocessing import StandardScaler\n'), ((542, 569), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""', 'C': '(1.0)'}), "(kernel='linear', C=1.0)\n", (545, 569), False, 'from sklearn.svm import SVC\n')] |
#!/usr/bin/env python3
import sys
import cereal.messaging as messaging
import numpy as np
import matplotlib.pyplot as plt
N = 21
# debug longitudinal MPC by plotting its trajectory. To receive liveLongitudinalMpc packets,
# set on LOG_MPC env variable and run plannerd on a replay
def plot_longitudinal_mpc(addr="127.0.0.1"):
# *** log ***
livempc = messaging.sub_sock('liveLongitudinalMpc', addr=addr, conflate=True)
radarstate = messaging.sub_sock('radarState', addr=addr, conflate=True)
plt.ion()
fig = plt.figure()
t = np.hstack([np.arange(0.0, 0.8, 0.2), np.arange(0.8, 10.6, 0.6)])
p_x_ego = fig.add_subplot(3, 2, 1)
p_v_ego = fig.add_subplot(3, 2, 3)
p_a_ego = fig.add_subplot(3, 2, 5)
# p_x_l = fig.add_subplot(3, 2, 2)
# p_a_l = fig.add_subplot(3, 2, 6)
p_d_l = fig.add_subplot(3, 2, 2)
p_d_l_v = fig.add_subplot(3, 2, 4)
p_d_l_vv = fig.add_subplot(3, 2, 6)
p_v_ego.set_ylim([0, 30])
p_a_ego.set_ylim([-4, 4])
p_d_l.set_ylim([-1, 10])
p_x_ego.set_title('x')
p_v_ego.set_title('v')
p_a_ego.set_title('a')
p_d_l.set_title('rel dist')
l_x_ego, = p_x_ego.plot(t, np.zeros(N))
l_v_ego, = p_v_ego.plot(t, np.zeros(N))
l_a_ego, = p_a_ego.plot(t, np.zeros(N))
l_x_l, = p_x_ego.plot(t, np.zeros(N))
l_v_l, = p_v_ego.plot(t, np.zeros(N))
l_a_l, = p_a_ego.plot(t, np.zeros(N))
l_d_l, = p_d_l.plot(t, np.zeros(N))
l_d_l_v, = p_d_l_v.plot(np.zeros(N))
l_d_l_vv, = p_d_l_vv.plot(np.zeros(N))
p_x_ego.legend(['ego', 'l'])
p_v_ego.legend(['ego', 'l'])
p_a_ego.legend(['ego', 'l'])
p_d_l_v.set_xlabel('d_rel')
p_d_l_v.set_ylabel('v_rel')
p_d_l_v.set_ylim([-20, 20])
p_d_l_v.set_xlim([0, 100])
p_d_l_vv.set_xlabel('d_rel')
p_d_l_vv.set_ylabel('v_rel')
p_d_l_vv.set_ylim([-5, 5])
p_d_l_vv.set_xlim([10, 40])
while True:
lMpc = messaging.recv_sock(livempc, wait=True)
rs = messaging.recv_sock(radarstate, wait=True)
if lMpc is not None:
if lMpc.liveLongitudinalMpc.mpcId != 1:
continue
x_ego = list(lMpc.liveLongitudinalMpc.xEgo)
v_ego = list(lMpc.liveLongitudinalMpc.vEgo)
a_ego = list(lMpc.liveLongitudinalMpc.aEgo)
x_l = list(lMpc.liveLongitudinalMpc.xLead)
v_l = list(lMpc.liveLongitudinalMpc.vLead)
# a_l = list(lMpc.liveLongitudinalMpc.aLead)
a_l = rs.radarState.leadOne.aLeadK * np.exp(-lMpc.liveLongitudinalMpc.aLeadTau * t**2 / 2)
#print(min(a_ego), lMpc.liveLongitudinalMpc.qpIterations)
l_x_ego.set_ydata(x_ego)
l_v_ego.set_ydata(v_ego)
l_a_ego.set_ydata(a_ego)
l_x_l.set_ydata(x_l)
l_v_l.set_ydata(v_l)
l_a_l.set_ydata(a_l)
l_d_l.set_ydata(np.array(x_l) - np.array(x_ego))
l_d_l_v.set_ydata(np.array(v_l) - np.array(v_ego))
l_d_l_v.set_xdata(np.array(x_l) - np.array(x_ego))
l_d_l_vv.set_ydata(np.array(v_l) - np.array(v_ego))
l_d_l_vv.set_xdata(np.array(x_l) - np.array(x_ego))
p_x_ego.relim()
p_x_ego.autoscale_view(True, scaley=True, scalex=True)
fig.canvas.draw()
fig.canvas.flush_events()
if __name__ == "__main__":
if len(sys.argv) > 1:
plot_longitudinal_mpc(sys.argv[1])
else:
plot_longitudinal_mpc()
| [
"cereal.messaging.sub_sock",
"cereal.messaging.recv_sock",
"numpy.zeros",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.array"
] | [((358, 425), 'cereal.messaging.sub_sock', 'messaging.sub_sock', (['"""liveLongitudinalMpc"""'], {'addr': 'addr', 'conflate': '(True)'}), "('liveLongitudinalMpc', addr=addr, conflate=True)\n", (376, 425), True, 'import cereal.messaging as messaging\n'), ((441, 499), 'cereal.messaging.sub_sock', 'messaging.sub_sock', (['"""radarState"""'], {'addr': 'addr', 'conflate': '(True)'}), "('radarState', addr=addr, conflate=True)\n", (459, 499), True, 'import cereal.messaging as messaging\n'), ((503, 512), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (510, 512), True, 'import matplotlib.pyplot as plt\n'), ((521, 533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (531, 533), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1133), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1130, 1133), True, 'import numpy as np\n'), ((1164, 1175), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1172, 1175), True, 'import numpy as np\n'), ((1206, 1217), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1214, 1217), True, 'import numpy as np\n'), ((1246, 1257), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1254, 1257), True, 'import numpy as np\n'), ((1286, 1297), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1294, 1297), True, 'import numpy as np\n'), ((1326, 1337), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1334, 1337), True, 'import numpy as np\n'), ((1364, 1375), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1372, 1375), True, 'import numpy as np\n'), ((1403, 1414), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1411, 1414), True, 'import numpy as np\n'), ((1444, 1455), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1452, 1455), True, 'import numpy as np\n'), ((1816, 1855), 'cereal.messaging.recv_sock', 'messaging.recv_sock', (['livempc'], {'wait': '(True)'}), '(livempc, wait=True)\n', (1835, 1855), True, 'import cereal.messaging as messaging\n'), ((1865, 1907), 'cereal.messaging.recv_sock', 'messaging.recv_sock', (['radarstate'], {'wait': '(True)'}), '(radarstate, wait=True)\n', (1884, 1907), True, 'import cereal.messaging as messaging\n'), ((552, 576), 'numpy.arange', 'np.arange', (['(0.0)', '(0.8)', '(0.2)'], {}), '(0.0, 0.8, 0.2)\n', (561, 576), True, 'import numpy as np\n'), ((578, 603), 'numpy.arange', 'np.arange', (['(0.8)', '(10.6)', '(0.6)'], {}), '(0.8, 10.6, 0.6)\n', (587, 603), True, 'import numpy as np\n'), ((2341, 2396), 'numpy.exp', 'np.exp', (['(-lMpc.liveLongitudinalMpc.aLeadTau * t ** 2 / 2)'], {}), '(-lMpc.liveLongitudinalMpc.aLeadTau * t ** 2 / 2)\n', (2347, 2396), True, 'import numpy as np\n'), ((2658, 2671), 'numpy.array', 'np.array', (['x_l'], {}), '(x_l)\n', (2666, 2671), True, 'import numpy as np\n'), ((2674, 2689), 'numpy.array', 'np.array', (['x_ego'], {}), '(x_ego)\n', (2682, 2689), True, 'import numpy as np\n'), ((2715, 2728), 'numpy.array', 'np.array', (['v_l'], {}), '(v_l)\n', (2723, 2728), True, 'import numpy as np\n'), ((2731, 2746), 'numpy.array', 'np.array', (['v_ego'], {}), '(v_ego)\n', (2739, 2746), True, 'import numpy as np\n'), ((2772, 2785), 'numpy.array', 'np.array', (['x_l'], {}), '(x_l)\n', (2780, 2785), True, 'import numpy as np\n'), ((2788, 2803), 'numpy.array', 'np.array', (['x_ego'], {}), '(x_ego)\n', (2796, 2803), True, 'import numpy as np\n'), ((2830, 2843), 'numpy.array', 'np.array', (['v_l'], {}), '(v_l)\n', (2838, 2843), True, 'import numpy as np\n'), ((2846, 2861), 'numpy.array', 'np.array', (['v_ego'], {}), '(v_ego)\n', (2854, 2861), True, 'import numpy as np\n'), ((2888, 2901), 'numpy.array', 'np.array', (['x_l'], {}), '(x_l)\n', (2896, 2901), True, 'import numpy as np\n'), ((2904, 2919), 'numpy.array', 'np.array', (['x_ego'], {}), '(x_ego)\n', (2912, 2919), True, 'import numpy as np\n')] |
from lz import *
from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, \
MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter
from collections import namedtuple
from config import conf
import functools, logging
from torch import nn
import numpy as np
if conf.use_chkpnt:
BatchNorm2d = functools.partial(BatchNorm2d, momentum=1 - np.sqrt(0.9))
def l2_norm(input, axis=1, need_norm=False, ):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
if need_norm:
return output, norm
else:
return output
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Identity(Module):
def forward(self, x):
return x
class Swish(nn.Module):
def __init__(self, *args):
super(Swish, self).__init__()
def forward(self, x):
res = x * torch.sigmoid(x)
# assert not torch.isnan(res).any().item()
return res
class Mish(nn.Module):
def __init__(self, *args):
super(Mish, self).__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
if conf.use_act == 'elu':
NonLin = lambda *args: nn.ELU(inplace=True)
elif conf.use_act == 'prelu':
NonLin = nn.PReLU
elif conf.use_act == 'mish':
NonLin = Mish
elif conf.use_act == 'swish':
NonLin = Swish
else:
raise NotImplementedError()
class SEModule(nn.Module):
def __init__(self, channels, reduction=4, mult=conf.sigmoid_mult):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = NonLin(channels // reduction) if conf.upgrade_irse else nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
channels // reduction, channels, kernel_size=1, padding=0, bias=False)
# todo tanh+1 or sigmoid or sigmoid*2
self.sigmoid = nn.Sigmoid() # nn.Tanh()
self.mult = mult
nn.init.xavier_uniform_(self.fc1.weight.data)
nn.init.xavier_uniform_(self.fc2.weight.data)
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x) # +1
# def norm(x):
# return torch.norm(x[1])
# norm(module_input), norm(module_input*x), norm(module_input * x * self.mult)
return module_input * x * self.mult
from modules.bn import InPlaceABN, InPlaceABNSync
def bn_act(depth, with_act, ipabn=None):
if ipabn:
if with_act:
if ipabn == 'sync':
return [InPlaceABNSync(depth, activation='none'),
NonLin(depth, ), ]
else:
return [InPlaceABN(depth, activation='none'),
NonLin(depth, ), ]
else:
return [InPlaceABN(depth, activation='none')]
else:
if with_act:
return [BatchNorm2d(depth), NonLin(depth, ), ]
else:
return [BatchNorm2d(depth)]
def bn2d(depth, ipabn=None):
if ipabn:
if ipabn == 'sync':
return InPlaceABNSync(depth, activation='none')
else:
return InPlaceABN(depth, activation='none')
else:
return BatchNorm2d(depth)
# @deprecated
# todo gl_conf.upgrade_ir
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride, ):
super(bottleneck_IR, self).__init__()
ipabn = conf.ipabn
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
nn.Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
if conf.upgrade_irse:
self.res_layer = Sequential(
*bn_act(in_channel, False, ipabn),
nn.Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
*bn_act(depth, True, ipabn),
nn.Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
*bn_act(depth, False, ipabn))
else:
self.res_layer = Sequential(
BatchNorm2d(in_channel),
nn.Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), NonLin(depth),
nn.Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class true_bottleneck_IR_SE(Module): # this is botleneck block
def __init__(self, in_channel, depth, stride):
super(true_bottleneck_IR_SE, self).__init__()
if in_channel == depth and stride == 1:
self.shortcut_layer = Identity()
else:
# todo arch ft
self.shortcut_layer = Sequential(
nn.Conv2d(in_channel, depth, (1, 1), stride, bias=False),
*bn_act(depth, False, conf.ipabn)
)
self.res_layer = Sequential(
*bn_act(in_channel, False, conf.ipabn),
nn.Conv2d(in_channel, in_channel // 4, (1, 1), (1, 1), 0, bias=False),
*bn_act(in_channel // 4, True, conf.ipabn),
nn.Conv2d(in_channel // 4, in_channel // 4, (3, 3), stride, 1, bias=False),
*bn_act(in_channel // 4, True, conf.ipabn),
nn.Conv2d(in_channel // 4, depth, (1, 1), 1, 0, bias=False),
*bn_act(depth, False, conf.ipabn),
SEModule(depth, 16),
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
res.add_(shortcut)
return res
class bottleneck_IR_SE(Module): # this is basic block
def __init__(self, in_channel, depth, stride, use_in=False):
super(bottleneck_IR_SE, self).__init__()
if not conf.spec_norm:
conv_op = nn.Conv2d
else:
conv_op = lambda *args, **kwargs: nn.utils.spectral_norm(nn.Conv2d(*args, **kwargs))
if in_channel == depth and stride == 1:
self.shortcut_layer = Identity()
else:
if not conf.arch_ft:
self.shortcut_layer = Sequential(
nn.Conv2d(in_channel, depth, (1, 1), stride, bias=False),
*bn_act(depth, False, conf.ipabn)
)
else:
self.shortcut_layer = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channel, depth, 1, 1, bias=False),
*bn_act(depth, False, conf.ipabn)
)
assert conf.upgrade_irse
self.res_layer = Sequential(
*bn_act(in_channel, False, conf.ipabn),
conv_op(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
*bn_act(depth, True, conf.ipabn),
conv_op(depth, depth, (3, 3), stride, 1, bias=False),
*bn_act(depth, False, conf.ipabn),
SEModule(depth, 16)
)
if not use_in:
self.IN = None
else:
self.IN = nn.InstanceNorm2d(depth, affine=True)
def forward_ipabn(self, x):
shortcut = self.shortcut_layer(x.clone())
res = self.res_layer(x)
res.add_(shortcut)
if self.IN:
res = self.IN(res)
return res
def forward_ori(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
res.add_(shortcut)
if self.IN:
res = self.IN(res)
return res
if conf.ipabn:
forward = forward_ipabn
else:
forward = forward_ori
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
'''A named tuple describing a ResNet block.'''
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if conf.bottle_neck:
filter_list = [64, 256, 512, 1024, 2048]
else:
filter_list = [64, 64, 128, 256, 512]
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 49:
units = [3, 4, 14, 3]
elif num_layers == 50: # basic
units = [3, 4, 14, 3]
elif num_layers == 74:
units = [3, 6, 24, 3]
elif num_layers == 90:
units = [3, 8, 30, 3]
elif num_layers == 98:
units = [3, 4, 38, 3]
elif num_layers == 99:
units = [3, 8, 35, 3]
elif num_layers == 100: # basic
units = [3, 13, 30, 3]
elif num_layers == 134:
units = [3, 10, 50, 3]
elif num_layers == 136:
units = [3, 13, 48, 3]
elif num_layers == 140: # basic
units = [3, 15, 48, 3]
elif num_layers == 124: # basic
units = [3, 13, 40, 5]
elif num_layers == 160: # basic
units = [3, 24, 49, 3]
elif num_layers == 101: # bottleneck
units = [3, 4, 23, 3]
elif num_layers == 152: # bottleneck
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
blocks = [get_block(filter_list[0], filter_list[1], units[0]),
get_block(filter_list[1], filter_list[2], units[1]),
get_block(filter_list[2], filter_list[3], units[2]),
get_block(filter_list[3], filter_list[4], units[3])
]
return blocks
from torch.utils.checkpoint import checkpoint_sequential
class Backbone(Module):
def __init__(self, num_layers=conf.net_depth, drop_ratio=conf.drop_ratio, mode=conf.net_mode,
ebsize=conf.embedding_size):
super(Backbone, self).__init__()
# assert num_layers in [18, 34, 20, 50, 100, 152, ], 'num_layers should be not defined'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
if not conf.bottle_neck:
unit_module = bottleneck_IR_SE
else:
unit_module = true_bottleneck_IR_SE
if not conf.arch_ft and not conf.use_in:
self.input_layer = Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
bn2d(64, conf.ipabn),
NonLin(64))
elif conf.arch_ft and not conf.use_in:
self.input_layer = Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
bn2d(64, conf.ipabn),
NonLin(64),
nn.Conv2d(64, 64, (3, 3), 1, 1, bias=False),
bn2d(64, conf.ipabn),
NonLin(64),
)
elif conf.arch_ft and conf.use_in:
self.input_layer = Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
nn.InstanceNorm2d(64, affine=True),
NonLin(64),
nn.Conv2d(64, 64, (3, 3), 1, 1, bias=False),
nn.InstanceNorm2d(64, affine=True),
NonLin(64),
)
elif not conf.arch_ft and conf.use_in:
self.input_layer = Sequential(nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
nn.InstanceNorm2d(64, affine=True),
NonLin(64), )
out_resolution = conf.input_size // 16
if conf.bottle_neck:
expansions = 4
else:
expansions = 1
out_planes = 512 * expansions
if conf.out_type == 'fc':
self.output_layer = Sequential(
bn2d(out_planes, conf.ipabn),
Dropout(drop_ratio),
Flatten(),
nn.Linear(out_planes * out_resolution ** 2, ebsize,
bias=True if not conf.upgrade_bnneck else False),
nn.BatchNorm1d(ebsize))
if conf.pfe:
self.output_layer_sigma = Sequential(
bn2d(out_planes, conf.ipabn),
Dropout(drop_ratio),
Flatten(),
nn.Linear(out_planes * out_resolution ** 2, ebsize,
bias=True if not conf.upgrade_bnneck else False),
nn.BatchNorm1d(ebsize))
elif conf.out_type == 'gpool':
self.output_layer = nn.Sequential(
bn2d(out_planes, conf.ipabn),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(out_planes, ebsize, bias=False),
nn.BatchNorm1d(ebsize),
)
modules = []
for ind, block in enumerate(blocks):
for bottleneck in block:
modules.append(
unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride,
use_in=ind==0 if conf.use_in else False,
))
self.body = Sequential(*modules)
if conf.mid_type == 'gpool':
self.fcs = nn.Sequential(
nn.Sequential(bn2d(64 * expansions, conf.ipabn),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(64 * expansions, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
nn.Sequential(bn2d(128 * expansions, conf.ipabn),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(128 * expansions, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
nn.Sequential(bn2d(256 * expansions, conf.ipabn),
nn.AdaptiveAvgPool2d(1),
Flatten(),
nn.Linear(256 * expansions, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
)
elif conf.mid_type == 'fc':
self.fcs = nn.Sequential(
nn.Sequential(bn2d(64 * expansions, conf.ipabn),
Dropout(drop_ratio),
Flatten(),
nn.Linear(64 * expansions * (out_resolution * 8) ** 2, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
nn.Sequential(bn2d(128 * expansions, conf.ipabn),
Dropout(drop_ratio),
Flatten(),
nn.Linear(128 * expansions * (out_resolution * 4) ** 2, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
nn.Sequential(bn2d(256 * expansions, conf.ipabn),
Dropout(drop_ratio),
Flatten(),
nn.Linear(256 * expansions * (out_resolution * 2) ** 2, ebsize, bias=False),
nn.BatchNorm1d(ebsize), ),
)
else:
self.fcs = None
if conf.use_bl or conf.ds:
self.fuse_wei = nn.Linear(4, 1, bias=False)
else:
self.fuse_wei=None
if conf.use_bl:
self.bls = nn.Sequential(
nn.Sequential(*[unit_module(64 * expansions, 64 * expansions, 1) for _ in range(3)]),
nn.Sequential(*[unit_module(128 * expansions, 128 * expansions, 1) for _ in range(2)]),
nn.Sequential(*[unit_module(256 * expansions, 256 * expansions, 1) for _ in range(1)]),
)
else:
self.bls = nn.Sequential(
Identity(),
Identity(),
Identity(),
)
self._initialize_weights()
if conf.pfe:
nn.init.constant_(self.output_layer_sigma[-1].weight, 0)
nn.init.constant_(self.output_layer_sigma[-1].bias, 1)
def forward(self, inp, *args, **kwargs, ):
bs = inp.shape[0]
if conf.input_size != inp.shape[-1]:
inp = F.interpolate(inp, size=conf.input_size, mode='bilinear', align_corners=True) # bicubic
x = self.input_layer(inp)
# x = self.body(x)
if conf.net_depth == 18:
break_inds = [1, 3, 5, ] # r18
else:
break_inds = [3 - 1, 3 + 4 - 1, 3 + 4 + 13 - 1, ] # r50
xs = []
shps = []
for ind, layer in enumerate(self.body):
x = layer(x)
shps.append(x.shape)
if ind in break_inds:
xs.append(x)
xs.append(x)
assert not judgenan(x), f'x'
if conf.mid_type:
v4 = self.output_layer(x)
v1 = self.fcs[0](self.bls[0](xs[0]))
v2 = self.fcs[1](self.bls[1](xs[1]))
v3 = self.fcs[2](self.bls[2](xs[2]))
v5 = self.fuse_wei(torch.stack([v1, v2, v3, v4], dim=-1)).view(bs, -1)
if conf.ds and self.training:
return [v5, v4, v3, v2, v1]
elif conf.ds and not self.training:
return v5
else:
v5 = self.output_layer(x)
assert not judgenan(v5)
if conf.pfe:
sigma = self.output_layer_sigma(x)
return v5, sigma
return v5
def forward_ori(self, inp, *args, **kwargs):
if conf.input_size != inp.shape[-1]:
inp = F.interpolate(inp, size=conf.input_size, mode='bilinear', align_corners=True) # bicubic
x = self.input_layer(inp)
x = self.body(x)
x = self.output_layer(x)
return x
def forward_old(self, x, normalize=True, return_norm=False, mode='train'):
if mode == 'finetune':
with torch.no_grad():
x = self.input_layer(x)
x = self.body(x)
elif mode == 'train':
x = self.input_layer(x)
if not conf.use_chkpnt:
x = self.body(x)
else:
x = checkpoint_sequential(self.body, 2, x)
else:
raise ValueError(mode)
x = self.output_layer(x)
x_norm, norm = l2_norm(x, axis=1, need_norm=True)
if normalize:
if return_norm:
return x_norm, norm
else:
return x_norm # the default one
else:
if return_norm:
return x, norm
else:
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
# todo
for m in self.modules():
if isinstance(m, bottleneck_IR_SE):
nn.init.constant_(m.res_layer[5].weight, 0)
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, groups=1, bias=False,
):
super(DoubleConv, self).__init__()
if isinstance(kernel_size, tuple) and kernel_size[0] == kernel_size[1]:
kernel_size = kernel_size[0]
zp = kernel_size + 1
self.cl, self.cl2, self.zp, self.z, = in_channels, out_channels, zp, kernel_size,
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride, self.padding = stride, padding
self.bias = None
self.groups = groups
self.dilation = dilation
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, zp, zp))
self.reset_parameters()
def reset_parameters(self):
from torch.nn import init
n = self.in_channels
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
cl, cl2, zp, z, = self.cl, self.cl2, self.zp, self.z,
cl //= self.groups
with torch.no_grad():
Il = torch.eye(cl * z * z).type_as(self.weight)
Il = Il.view(cl * z * z, cl, z, z)
Wtl = F.conv2d(self.weight, Il)
zpz = zp - z + 1
Wtl = Wtl.view(cl2 * zpz * zpz, cl, z, z)
Ol2 = F.conv2d(input, Wtl, bias=None, stride=self.stride,
padding=self.padding,
dilation=self.dilation, groups=self.groups, )
bs, _, wl2, hl2 = Ol2.size()
Ol2 = Ol2.view(bs, -1, zpz, zpz)
Il2 = F.adaptive_avg_pool2d(Ol2, (1, 1))
res = Il2.view(bs, -1, wl2, hl2)
return res
# DoubleConv(16,32)(torch.randn(4,16,112,112))
def count_double_conv(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh = kw = m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
multiply_adds = 1
kernel_ops = multiply_adds * kh * kw
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * kernel_ops * cin // m.groups
zp, z = m.zp, m.z
zpz = zp - z + 1
total_ops *= zpz ** 2
total_ops += y.numel() * zpz ** 2
m.total_ops = torch.Tensor([int(total_ops)])
class STNConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, groups=1, bias=False,
controller=None,
reduction=1,
):
super(STNConv, self).__init__()
if isinstance(kernel_size, tuple) and kernel_size[0] == kernel_size[1]:
kernel_size = kernel_size[0]
zmeta = kernel_size + 1
if controller is None:
controller = get_controller(scale=(1,)) # todo kernel_size / (kernel_size + .5)
self.in_plates, self.out_plates, self.zmeta, self.z, = in_channels, out_channels, zmeta, kernel_size,
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
self.kernel_size = kernel_size
self.padding = padding
self.bias = None
self.groups = groups
self.dilation = dilation
self.weight = nn.Parameter(
torch.FloatTensor(out_channels // 4, in_channels // groups, zmeta, zmeta)) # todo
self.reset_parameters()
self.register_buffer('theta', torch.FloatTensor(controller).view(-1, 2, 3))
# self.stride2 = self.theta.shape[0] # todo
self.stride2 = 1
self.n_inst, self.n_inst_sqrt = (self.zmeta - self.z + 1) * (self.zmeta - self.z + 1), self.zmeta - self.z + 1
def reset_parameters(self):
from torch.nn import init
n = self.in_channels
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
bs = input.size(0)
weight_l = []
for theta_ in (self.theta):
grid = F.affine_grid(theta_.expand(self.weight.size(0), 2, 3), self.weight.size())
weight_l.append(F.grid_sample(self.weight, grid))
# todo
# weight_l.append(self.weight.transpose(2,3).flip(3)) # 270
# weight_l.append(self.weight.flip(2).flip(3) # 180
# weight_l.append(self.weight.transpose(2,3).flip(2)) # 90
weight_inst = torch.cat(weight_l)
weight_inst = weight_inst[:, :, :self.kernel_size, :self.kernel_size]
out = F.conv2d(input, weight_inst, bias=None, stride=self.stride,
padding=self.padding,
dilation=self.dilation, groups=self.groups, )
# self.out_inst = out
h, w = out.shape[2], out.shape[3]
out = out.view(bs, -1, self.out_plates, h, w)
out = out.permute(0, 3, 4, 1, 2).contiguous().view(bs, self.out_plates * h * w, -1)
out = F.avg_pool1d(out, self.stride2)
# out = F.max_pool1d(out, self.stride2) # todo
out = out.permute(0, 2, 1).contiguous().view(bs, -1, h, w)
# self.out=out
# out = F.avg_pool2d(out, out.size()[2:])
# out = out.view(out.size(0), -1)
return out
def get_controller(
scale=(1,
# 3 / 3.5,
),
translation=(0,
# 2 / (meta_kernel_size - 1),
),
theta=(0,
np.pi,
# np.pi / 16, -np.pi / 16,
np.pi / 2, -np.pi / 2,
# np.pi / 4, -np.pi / 4,
# np.pi * 3 / 4, -np.pi * 3 / 4,
)
):
controller = []
for sx in scale:
# for sy in scale:
sy = sx
for tx in translation:
for ty in translation:
for th in theta:
controller.append([sx * np.cos(th), -sx * np.sin(th), tx,
sy * np.sin(th), sy * np.cos(th), ty])
logging.info(f'controller stride is {len(controller)} ', )
controller = np.stack(controller)
controller = controller.reshape(-1, 2, 3)
controller = np.ascontiguousarray(controller, np.float32)
return controller
# m = STNConv(4, 16, controller=get_controller()).cuda()
# m(torch.randn(1, 4, 112, 112).cuda())
class Conv_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1, ):
super(Conv_block, self).__init__()
self.conv = nn.Conv2d(in_c, out_channels=out_c,
kernel_size=kernel, groups=groups, stride=stride, padding=padding,
bias=False)
if conf.spec_norm:
self.conv = nn.utils.spectral_norm(self.conv)
self.bn = bn2d(out_c, conf.ipabn)
self.PReLU = NonLin(out_c)
# @jit.script_method
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.PReLU(x)
return x
class Linear_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1, spec_norm=conf.spec_norm):
super(Linear_block, self).__init__()
self.conv = nn.Conv2d(in_c, out_channels=out_c,
kernel_size=kernel, groups=groups, stride=stride, padding=padding,
bias=False)
if spec_norm:
self.conv = nn.utils.spectral_norm(self.conv)
self.bn = bn2d(out_c, conf.ipabn)
# @jit.script_method
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
from models.lpf import Downsample
class Depth_Wise(Module):
def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1, ):
super(Depth_Wise, self).__init__()
self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1), )
if conf.lpf and stride[0] == 2:
self.conv_dw = Linear_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=(1, 1),
)
else:
self.conv_dw = Linear_block(
groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride,
spec_norm=False # todo
)
if conf.mbfc_se:
self.se = SEModule(groups)
else:
self.se = Identity()
self.conv_dw_nlin = NonLin(groups)
if conf.lpf and stride[0] == 2:
self.dnsmpl = Downsample(channels=groups, filt_size=5, stride=2)
else:
self.dnsmpl = Identity()
self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1), )
self.residual = residual
def forward(self, x):
xx = self.conv(x)
xx = self.conv_dw(xx)
xx = self.se(xx)
xx = self.conv_dw_nlin(xx)
xx = self.dnsmpl(xx)
xx = self.project(xx)
if self.residual:
output = x + xx
else:
output = xx
return output
class Residual(Module):
def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1), width_mult=1.):
super(Residual, self).__init__()
modules = []
for _ in range(num_block):
modules.append(
Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups,
))
self.model = Sequential(*modules)
def forward(self, x, ):
return self.model(x)
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
class MobileFaceNet(Module):
def __init__(self, embedding_size=conf.embedding_size,
width_mult=conf.mbfc_wm, depth_mult=conf.mbfc_dm):
super(MobileFaceNet, self).__init__()
# if mode == 'small':
# blocks = [1, 4, 6, 2]
# else:
# blocks = [2, 8, 16, 4]
blocks = [1, 4, 8, 2]
blocks = [make_divisible(b * depth_mult, 1) for b in blocks]
self.conv1 = Conv_block(3, make_divisible(64 * width_mult),
kernel=(3, 3), stride=(2, 2), padding=(1, 1), )
# if blocks[0] == 1:
# self.conv2_dw = Conv_block(make_divisible(64 * width_mult), make_divisible(64 * width_mult), kernel=(3, 3),
# stride=(1, 1), padding=(1, 1), groups=make_divisible(64 * width_mult),
# )
# else:
self.conv2_dw = Residual(make_divisible(64 * width_mult), num_block=blocks[0],
groups=make_divisible(64 * width_mult), kernel=(3, 3), stride=(1, 1),
padding=(1, 1),
)
self.conv_23 = Depth_Wise(make_divisible(64 * width_mult), make_divisible(64 * width_mult),
kernel=(3, 3),
stride=(2, 2), padding=(1, 1),
groups=make_divisible(128 * width_mult),
)
self.conv_3 = Residual(make_divisible(64 * width_mult), num_block=blocks[1],
groups=make_divisible(128 * width_mult), kernel=(3, 3), stride=(1, 1), padding=(1, 1),
)
self.conv_34 = Depth_Wise(make_divisible(64 * width_mult), make_divisible(128 * width_mult), kernel=(3, 3),
stride=(2, 2), padding=(1, 1), groups=make_divisible(256 * width_mult),
)
self.conv_4 = Residual(make_divisible(128 * width_mult), num_block=blocks[2],
groups=make_divisible(256 * width_mult), kernel=(3, 3), stride=(1, 1), padding=(1, 1),
)
self.conv_45 = Depth_Wise(make_divisible(128 * width_mult), make_divisible(128 * width_mult), kernel=(3, 3),
stride=(2, 2), padding=(1, 1), groups=make_divisible(512 * width_mult),
)
self.conv_5 = Residual(make_divisible(128 * width_mult), num_block=blocks[3],
groups=make_divisible(256 * width_mult), kernel=(3, 3), stride=(1, 1), padding=(1, 1),
)
# Conv2d_bk = nn.Conv2d
# nn.Conv2d = STNConv
self.conv_6_sep = Conv_block(make_divisible(128 * width_mult), make_divisible(512 * width_mult), kernel=(1, 1),
stride=(1, 1), padding=(0, 0), )
out_resolution = conf.input_size // 16
self.conv_6_dw = Linear_block(make_divisible(512 * width_mult), make_divisible(512 * width_mult),
groups=make_divisible(512 * width_mult), kernel=(out_resolution, out_resolution),
stride=(1, 1),
padding=(0, 0),
)
# nn.Conv2d = Conv2d_bk
self.conv_6_flatten = Flatten()
self.linear = nn.Linear(make_divisible(512 * width_mult), embedding_size, bias=False, )
if conf.spec_norm:
self.linear = nn.utils.spectral_norm(self.linear)
self.bn = nn.BatchNorm1d(embedding_size)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, DoubleConv, STNConv)):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
# @jit.script_method
def forward(self, x, *args, **kwargs):
if conf.input_size != x.shape[-1]:
x = F.interpolate(x, size=conf.input_size, mode='bicubic', align_corners=True)
out = self.conv1(x)
out = self.conv2_dw(out)
out = self.conv_23(out)
out = self.conv_3(out)
out = self.conv_34(out)
out = self.conv_4(out)
out = self.conv_45(out)
out = self.conv_5(out)
fea7x7 = self.conv_6_sep(out)
out = self.conv_6_dw(fea7x7)
out = self.conv_6_flatten(out)
out = self.linear(out)
out = self.bn(out)
if kwargs.get('use_of', False):
return out, fea7x7
else:
return out
class CSMobileFaceNet(nn.Module):
def __init__(self):
raise ValueError('deprecated')
# nB = gl_conf.batch_size
# idx_ = torch.arange(0, nB, dtype=torch.long)
class AdaCos(nn.Module):
def __init__(self, num_classes=None, m=conf.margin, num_features=conf.embedding_size):
super(AdaCos, self).__init__()
self.num_features = num_features
self.n_classes = num_classes
self.s = math.sqrt(2) * math.log(num_classes - 1) # todo maybe scale
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(num_features, num_classes))
nn.init.xavier_uniform_(self.kernel)
self.device_id = list(range(conf.num_devs))
self.step = 0
self.writer = conf.writer
self.interval = conf.log_interval
self.k = 0.5
assert self.writer is not None
def update_mrg(self, m=conf.margin, s=conf.scale):
self.m = m
self.s = s
def forward(self, input, label=None):
bs = input.shape[0]
x = F.normalize(input, dim=1)
W = F.normalize(self.kernel, dim=0)
# logits = F.linear(x, W)
logits = torch.mm(x, W).clamp(-1, 1)
logits = logits.float()
if label is None:
return logits
# add margin
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
if self.m != 0:
target_logits = torch.cos(theta + self.m)
output = logits * (1 - one_hot) + target_logits * one_hot
else:
output = logits
# feature re-scale
with torch.no_grad():
B_avg = torch.logsumexp((self.s * logits)[one_hot < 1], dim=0) - np.log(input.shape[0])
B_avg = B_avg + np.log(self.k / (1 - self.k))
# print(B_avg, self.s)
theta_neg = theta[one_hot < 1].view(bs, self.n_classes - 1)
theta_pos = theta[one_hot == 1]
theta_med = torch.median(theta_pos + self.m)
s_now = B_avg / torch.cos(torch.min(
(math.pi / 4 + self.m) * torch.ones_like(theta_med),
theta_med))
# self.s = self.s * 0.9 + s_now * 0.1
self.s = s_now
if self.step % self.interval == 0:
self.writer.add_scalar('theta/pos_med', theta_med.item(), self.step)
self.writer.add_scalar('theta/pos_mean', theta_pos.mean().item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
self.writer.add_scalar('theta/neg_mean', theta_neg.mean().item(), self.step)
self.writer.add_scalar('theta/bavg', B_avg.item(), self.step)
self.writer.add_scalar('theta/scale', self.s, self.step)
if self.step % 9999 == 0:
# self.writer.add_histogram('theta/pos_th', theta_pos, self.step)
# self.writer.add_histogram('theta/pos_neg', theta_neg, self.step)
pass
self.step += 1
output *= self.s
return output
class AdaMrg(nn.Module):
def __init__(self, num_classes=None, m=conf.margin, num_features=conf.embedding_size):
super(AdaMrg, self).__init__()
self.num_features = num_features
self.n_classes = num_classes
self.s = conf.scale
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(num_features, num_classes))
nn.init.xavier_uniform_(self.kernel)
self.device_id = list(range(conf.num_devs))
self.step = 0
self.writer = conf.writer
self.interval = conf.log_interval
self.k = 0.5
assert self.writer is not None
def update_mrg(self, m=conf.margin, s=conf.scale):
self.m = m
self.s = s
def forward(self, input, label=None):
bs = input.shape[0] # (bs, fdim)
x = F.normalize(input, dim=1) # (bs, fdim)
W = F.normalize(self.kernel, dim=0) # (fdim, ncls)
logits = torch.mm(x, W).clamp(-1, 1) # (bs, ncls)
logits = logits.float()
if label is None:
return logits
# add margin
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(torch.clamp(logits, -1.0 + 1e-7, 1.0 - 1e-7))
# calc margin
with torch.no_grad():
B_avg = torch.logsumexp(self.s * logits[one_hot < 1], dim=0) - np.log(bs)
# B_avg = torch.FloatTensor([np.log(logits.shape[1] - 1)])
B_avg = B_avg + np.log(self.k / (1 - self.k))
theta_neg = theta[one_hot < 1].view(bs, self.n_classes - 1)
theta_pos = theta[one_hot == 1]
theta_med = torch.median(theta_pos)
m_now = torch.acos(B_avg / self.s) - min(theta_med.item(),
self.k * np.pi / 2
)
# m_now = m_now.clamp(0.1, 0.5)
m_now = m_now.item()
self.m = m_now
if self.step % self.interval == 0:
print('margin ', m_now, theta_med.item(), torch.acos(B_avg / self.s).item(), )
self.writer.add_scalar('theta/mrg', m_now, self.step)
self.writer.add_scalar('theta/pos_med', theta_med.item(), self.step)
self.writer.add_scalar('theta/pos_mean', theta_pos.mean().item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
self.writer.add_scalar('theta/neg_mean', theta_neg.mean().item(), self.step)
self.writer.add_scalar('theta/bavg', B_avg.item(), self.step)
self.writer.add_scalar('theta/scale', self.s, self.step)
if self.step % 999 == 0:
self.writer.add_histogram('theta/pos_th', theta_pos, self.step)
self.writer.add_histogram('theta/pos_neg', theta_neg, self.step)
pass
self.step += 1
if self.m != 0:
target_logits = torch.cos(theta + self.m)
output = logits * (1 - one_hot) + target_logits * one_hot
else:
output = logits
output *= self.s
return output
class AdaMArcface(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=conf.embedding_size, classnum=None, s=conf.scale, m=conf.margin):
super(AdaMArcface, self).__init__()
self.classnum = classnum
kernel = Parameter(torch.FloatTensor(embedding_size, classnum))
kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.kernel = kernel
self.update_mrg()
self.easy_margin = False
self.step = 0
self.writer = conf.writer
self.interval = conf.log_interval
m = Parameter(torch.Tensor(self.classnum))
m.data.fill_(0.5)
self.m = m
self.update_mrg()
def update_mrg(self, m=conf.margin, s=conf.scale):
self.s = s
def clamp_m(self):
self.m.data.clamp_(0, 0.8)
def forward_eff_v1(self, embeddings, label=None):
assert not torch.isnan(embeddings).any().item()
dev = self.m.get_device()
if dev == -1:
dev = 0
cos_m = (torch.cos(self.m))
sin_m = (torch.sin(self.m))
mm = (torch.sin(self.m) * (self.m))
threshold = (torch.cos(np.pi - self.m))
bs = embeddings.shape[0]
idx_ = torch.arange(0, bs, dtype=torch.long)
# self.m = self.m.clamp(min=0)
m_mean = torch.mean(self.m)
if self.interval >= 1 and self.step % self.interval == 0:
with torch.no_grad():
norm_mean = torch.norm(embeddings, dim=1).mean()
m_mean = torch.mean(self.m).cuda()
if self.writer:
self.writer.add_scalar('theta/norm_mean', norm_mean.item(), self.step)
self.writer.add_scalar('theta/m_mean', m_mean.item(), self.step)
self.writer.add_histogram('ms', to_numpy(self.m), self.step)
logging.info(f'norm {norm_mean.item():.2e}')
logging.info(f'm_mean {m_mean.item():.2e}')
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0) # 0 dim is emd dim
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
if label is None:
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} '
)
output = cos_theta.clone() # todo avoid copy ttl
cos_theta_need = cos_theta[idx_, label]
cos_theta_2 = torch.pow(cos_theta_need, 2)
sin_theta_2 = 1 - cos_theta_2
sin_theta = torch.sqrt(sin_theta_2)
cos_theta_m = (cos_theta_need * cos_m[label] - sin_theta * sin_m[label])
cond_mask = (cos_theta_need - threshold[label]) <= 0
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
if self.easy_margin:
keep_val = cos_theta_need
else:
keep_val = (cos_theta_need - mm[label]) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask].type_as(cos_theta_m)
if self.writer and self.step % self.interval == 0:
# self.writer.add_scalar('theta/cos_th_m_mean', torch.mean(cos_theta_m).item(), self.step)
self.writer.add_scalar('theta/cos_th_m_median', cos_theta_m.mean().item(), self.step)
output[idx_, label] = cos_theta_m.type_as(output)
output *= self.s
self.step += 1
return output, m_mean
forward = forward_eff_v1
class MySoftmax(Module):
def __init__(self, embedding_size=conf.embedding_size, classnum=None):
super(MySoftmax, self).__init__()
self.classnum = classnum
self.kernel = Parameter(torch.Tensor(embedding_size, classnum))
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.s = conf.scale
self.step = 0
self.writer = conf.writer
def forward(self, embeddings, label):
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0)
logits = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
with torch.no_grad():
if self.step % conf.log_interval == 0:
bs = embeddings.shape[0]
one_hot = torch.zeros_like(logits)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(logits)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
self.step += 1
logits *= self.s
return logits
class ArcSinMrg(nn.Module):
def __init__(self, embedding_size=conf.embedding_size, classnum=None, s=conf.scale, m=conf.margin):
super(ArcSinMrg, self).__init__()
self.classnum = classnum
kernel = nn.Linear(embedding_size, classnum, bias=False)
# kernel = Parameter(torch.Tensor(embedding_size, classnum))
kernel.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
# todo
if conf.spec_norm:
kernel = nn.utils.spectral_norm(kernel)
self.kernel = kernel
self.update_mrg()
self.easy_margin = False
self.step = 0
self.writer = conf.writer
self.interval = conf.log_interval
def update_mrg(self, m=conf.margin, s=conf.scale):
m = np.float32(m)
pi = np.float32(np.pi)
dev = conf.model1_dev[0]
if dev == -1:
dev = 0
self.m = m # the margin value, default is 0.5
self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
self.threshold = torch.FloatTensor([math.cos(pi - m)]).to(dev)
def forward(self, embeddings, label):
assert not torch.isnan(embeddings).any().item()
bs = embeddings.shape[0]
idx_ = torch.arange(0, bs, dtype=torch.long)
if self.interval >= 1 and self.step % self.interval == 0:
with torch.no_grad():
norm_mean = torch.norm(embeddings, dim=1).mean()
if self.writer:
self.writer.add_scalar('theta/norm_mean', norm_mean.item(), self.step)
logging.info(f'norm {norm_mean.item():.2e}')
embeddings = F.normalize(embeddings, dim=1)
cos_theta = self.kernel(embeddings)
cos_theta = cos_theta / torch.norm(self.kernel.weight, dim=1)
if label is None:
cos_theta *= self.s
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/pos_min', torch.min(theta_pos).item(), self.step)
self.writer.add_scalar('theta/pos_max', torch.max(theta_pos).item(), self.step)
# self.writer.add_histogram('theta/pos', theta_pos, self.step)
# self.writer.add_histogram('theta/neg', theta_neg, self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} '
)
output = cos_theta
cos_theta_need = cos_theta[idx_, label].clone()
theta = torch.acos(cos_theta_need)
sin_theta = torch.sin(theta.clamp(0, np.pi / 2))
cos_theta_m = torch.cos(theta + self.m * (1 - sin_theta) + 0.3)
# cos_theta_m = torch.cos(theta + self.m * cos_theta + 0.1)
cond_mask = (cos_theta_need - self.threshold) <= 0
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
output[idx_, label] = cos_theta_m.type_as(output)
output *= self.s # scale up in order to make softmax work, first introduced in normface
self.step += 1
return output
class Arcface(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=conf.embedding_size, classnum=None, s=conf.scale, m=conf.margin):
super(Arcface, self).__init__()
self.classnum = classnum
# todo
# kernel = nn.Linear(embedding_size, classnum, bias=False)
# kernel.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
kernel = Parameter(torch.Tensor(embedding_size, classnum))
kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
if conf.spec_norm:
kernel = nn.utils.spectral_norm(kernel)
self.kernel = kernel
self.update_mrg()
self.easy_margin = False
self.step_param = nn.Parameter(torch.Tensor([0]),requires_grad=False)
self.step = self.step_param.item()
self.writer = conf.writer
self.interval = conf.log_interval
def update_mrg(self, m=conf.margin, s=conf.scale):
m = np.float32(m)
pi = np.float32(np.pi)
self.m = m # the margin value, default is 0.5
self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
self.cos_m = np.cos(m)#torch.FloatTensor([np.cos(m)])
self.sin_m = np.sin(m)#torch.FloatTensor([np.sin(m)])
self.mm =np.sin(m)*m# torch.FloatTensor([np.sin(m) * m])
self.threshold =math.cos(pi-m)# torch.FloatTensor([math.cos(pi - m)])
# if torch.cuda.device_count()>0:
# self.cos_m = self.cos_m.cuda()
# self.sin_m=self.sin_m.cuda()
# self.mm=self.mm.cuda()
# self.threshold =self.threshold.cuda()
def forward_eff_v1(self, embeddings, label=None):
assert not torch.isnan(embeddings).any().item()
bs = embeddings.shape[0]
idx_ = torch.arange(0, bs, dtype=torch.long)
if self.interval >= 1 and self.step % self.interval == 0:
with torch.no_grad():
norm_mean = torch.norm(embeddings, dim=1).mean()
if self.writer:
self.writer.add_scalar('theta/norm_mean', norm_mean.item(), self.step)
logging.info(f'norm {norm_mean.item():.2e}')
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0) # 0 dim is emd dim
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
if label is None:
# cos_theta *= self.s # todo whether?
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} '
)
output = cos_theta.clone() # todo avoid copy ttl
cos_theta_need = cos_theta[idx_, label]
cos_theta_2 = torch.pow(cos_theta_need, 2)
sin_theta_2 = 1 - cos_theta_2
sin_theta = torch.sqrt(sin_theta_2)
cos_theta_m = (cos_theta_need * self.cos_m - sin_theta * self.sin_m)
cond_mask = (cos_theta_need - self.threshold) <= 0
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
if self.easy_margin:
keep_val = cos_theta_need
else:
keep_val = (cos_theta_need - self.mm) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask].type_as(cos_theta_m)
if self.writer and self.step % self.interval == 0:
# self.writer.add_scalar('theta/cos_th_m_mean', torch.mean(cos_theta_m).item(), self.step)
self.writer.add_scalar('theta/cos_th_m_median', cos_theta_m.mean().item(), self.step)
output[idx_, label] = cos_theta_m.type_as(output)
output *= self.s
self.step += 1
return output
def forward_eff_v2(self, embeddings, label=None):
self.step = self.step_param.item()
assert not torch.isnan(embeddings).any().item()
bs = embeddings.shape[0]
idx_ = torch.arange(0, bs, dtype=torch.long)
if (self.interval >= 1 and self.step % self.interval == 0) and self.kernel.get_device()==0:
with torch.no_grad():
norm_mean = torch.norm(embeddings, dim=1).mean()
if self.writer:
self.writer.add_scalar('theta/norm_mean', norm_mean.item(), self.step)
logging.info(f'norm {norm_mean.item():.2e}')
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0) # 0 dim is emd dim
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
# cos_theta = self.kernel(embeddings)
# cos_theta /= torch.norm(self.kernel.weight, dim=1)
# torch.norm(cos_theta, dim=1)
# stat(cos_theta)
if label is None:
cos_theta *= self.s
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0 and self.kernel.get_device()==0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
tpmean = torch.mean(theta_pos).item()
tnmean = torch.mean(theta_neg).item()
if self.writer:
self.writer.add_scalar('theta/pos_mean', tpmean , self.step)
self.writer.add_scalar('theta/pos_min', torch.min(theta_pos).item(), self.step)
self.writer.add_scalar('theta/pos_max', torch.max(theta_pos).item(), self.step)
# self.writer.add_histogram('theta/pos', theta_pos, self.step)
# self.writer.add_histogram('theta/neg', theta_neg, self.step)
self.writer.add_scalar('theta/neg_mean', tnmean, self.step)
logging.info(f'pos_med: {tpmean:.2e} ' +
f'neg_med: {tnmean:.2e} ' )
output = cos_theta.clone()
cos_theta_need = cos_theta[idx_, label]
theta = torch.acos(cos_theta_need)
cos_theta_m = torch.cos(theta + self.m)
cond_mask = (cos_theta_need - self.threshold) <= 0
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
output[idx_, label] = cos_theta_m.type_as(output)
output *= self.s # scale up in order to make softmax work, first introduced in normface
self.step_param.data +=1
return output
def forward_eff_v3(self, embeddings, label=None):
assert not torch.isnan(embeddings).any().item()
bs = embeddings.shape[0]
idx_ = torch.arange(0, bs, dtype=torch.long)
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0) # 0 dim is emd dim
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
if label is None:
# cos_theta *= self.s # todo whether?
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
else:
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} '
)
output = cos_theta
cos_theta_need = cos_theta[idx_, label].clone()
sin_theta = torch.sqrt(1 - torch.pow(cos_theta_need, 2))
cos_theta_m = (cos_theta_need * self.cos_m - sin_theta * self.sin_m)
cond_mask = (cos_theta_need - self.threshold) <= 0
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
if self.easy_margin:
keep_val = cos_theta_need
else:
keep_val = (cos_theta_need - self.mm) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask].type_as(cos_theta_m)
if self.writer and self.step % self.interval == 0:
# self.writer.add_scalar('theta/cos_th_m_mean', torch.mean(cos_theta_m).item(), self.step)
self.writer.add_scalar('theta/cos_th_m_median', cos_theta_m.mean().item(), self.step)
output[idx_, label] = cos_theta_m.type_as(output)
output *= self.s
self.step += 1
return output
def forward_neff(self, embeddings, label):
nB = embeddings.shape[0]
idx_ = torch.arange(0, nB, dtype=torch.long)
embeddings = F.normalize(embeddings, dim=1)
cos_theta = self.kernel(embeddings)
cos_theta /= torch.norm(self.kernel.weight, dim=1)
# kernel_norm = l2_norm(self.kernel, axis=0)
# cos_theta = torch.mm(embeddings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
cos_theta_2 = torch.pow(cos_theta, 2)
sin_theta_2 = 1 - cos_theta_2
sin_theta = torch.sqrt(sin_theta_2)
cos_theta_m = (cos_theta * self.cos_m - sin_theta * self.sin_m)
## this condition controls the theta+m should in range [0, pi]
## 0<=theta+m<=pi
## -m<=theta<=pi-m
cond_mask = (cos_theta - self.threshold) <= 0
if torch.any(cond_mask).item():
logging.info('this concatins a difficult sample')
keep_val = (cos_theta - self.mm) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask]
output = cos_theta * 1.0 # a little bit hacky way to prevent in_place operation on cos_theta
output[idx_, label] = cos_theta_m[idx_, label]
output *= self.s # scale up in order to make softmax work, first introduced in normface
return output
forward = forward_eff_v2
# forward = forward_eff_v1
# forward = forward_neff
# todo
class DistFCFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
pass
@staticmethod
def backward(ctx,grad_output):
pass
# todo this is v2: kernel upgrade
class ArcfaceNeg(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=conf.embedding_size, classnum=None, s=conf.scale, m=conf.margin):
super(ArcfaceNeg, self).__init__()
self.classnum = classnum
kernel = Parameter(torch.Tensor(embedding_size, classnum))
kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
# kernel = nn.Linear(embedding_size, classnum, bias=False)
# kernel.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.kernel = kernel
if conf.fp16:
m = np.float16(m)
pi = np.float16(np.pi)
else:
m = np.float32(m)
pi = np.float32(np.pi)
self.m = m # the margin value, default is 0.5
self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
self.cos_m = np.cos(m)
self.sin_m = np.sin(m)
self.mm = self.sin_m * m # issue 1
self.threshold = math.cos(pi - m)
self.threshold2 = math.cos(m)
self.m2 = conf.margin2
self.interval = conf.log_interval
self.step_param = nn.Parameter(torch.Tensor([0]),requires_grad=False)
self.step = self.step_param.item()
self.writer = conf.writer
def forward_eff(self, embeddings, label=None):
bs = embeddings.shape[0]
embeddings = F.normalize(embeddings, dim=1)
idx_ = torch.arange(0, bs, dtype=torch.long)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
if label is None:
cos_theta *= self.s
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1].view(bs, self.classnum - 1)
theta_pos = theta[one_hot == 1].view(bs)
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
else:
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} '
)
output = cos_theta
if self.m != 0:
cos_theta_need = cos_theta[idx_, label].clone()
cos_theta_2 = torch.pow(cos_theta_need, 2)
sin_theta_2 = 1 - cos_theta_2
sin_theta = torch.sqrt(sin_theta_2)
cos_theta_m = (cos_theta_need * self.cos_m - sin_theta * self.sin_m)
cond_mask = (cos_theta_need - self.threshold) <= 0 # those should be replaced
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample {cond_mask.sum().item()}')
keep_val = (cos_theta_need - self.mm) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask].type_as(cos_theta_m)
output[idx_, label] = cos_theta_m.type_as(output)
if self.m2 != 0:
with torch.no_grad():
cos_theta_neg = cos_theta.clone()
cos_theta_neg[idx_, label] = -self.s * 999
topk = conf.topk
topkind = torch.argsort(cos_theta_neg, dim=1)[:, -topk:]
idx = torch.stack([idx_] * topk, dim=1)
cos_theta_neg_need = cos_theta_neg[idx, topkind]
sin_theta_neg = torch.sqrt(1 - torch.pow(cos_theta_neg_need, 2))
cos_theta_neg_m = (cos_theta_neg_need * np.cos(self.m2) + sin_theta_neg * np.sin(self.m2))
cond_mask = (cos_theta_neg_need < self.threshold2) # what is masked is waht should not be replaced
if torch.any(cos_theta_neg_need >= self.threshold2).item():
logging.info(f'neg concatins difficult samples {(cos_theta_neg_need >= self.threshold2).sum().item()}')
cos_theta_neg_need = cos_theta_neg_need.clone()
cos_theta_neg_need[cond_mask] = cos_theta_neg_m[cond_mask]
output[idx, topkind] = cos_theta_neg_need.type_as(output)
output *= self.s
self.step += 1
return output
def forward_eff_v2(self, embeddings, label=None):
self.step = self.step_param.item()
bs = embeddings.shape[0]
if (self.interval >= 1 and self.step % self.interval == 0) and self.kernel.get_device()==0:# or (self.step<=999): # todo just for observation
with torch.no_grad():
norm_mean = torch.norm(embeddings, dim=1).mean()
if self.writer:
self.writer.add_scalar('theta/norm_mean', norm_mean.item(), self.step)
logging.info(f'{self.step} norm {norm_mean.item():.2e}')
embeddings = F.normalize(embeddings, dim=1)
idx_ = torch.arange(0, bs, dtype=torch.long)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embeddings, kernel_norm).clamp(-1, 1)
# cos_theta = self.kernel(embeddings)
# cos_theta /= torch.norm(self.kernel.weight, dim=1)
if label is None:
cos_theta *= self.s
return cos_theta
with torch.no_grad():
if self.interval >= 1 and self.step % self.interval == 0 and self.kernel.get_device()==0:
one_hot = torch.zeros_like(cos_theta)
one_hot.scatter_(1, label.view(-1, 1).long(), 1)
theta = torch.acos(cos_theta)
theta_neg = theta[one_hot < 1]
theta_pos = theta[idx_, label]
if self.writer:
self.writer.add_scalar('theta/pos_med', torch.mean(theta_pos).item(), self.step)
self.writer.add_scalar('theta/neg_med', torch.mean(theta_neg).item(), self.step)
logging.info(f'pos_med: {torch.mean(theta_pos).item():.2e} ' +
f'neg_med: {torch.mean(theta_neg).item():.2e} ')
output = cos_theta.clone()
if self.m != 0:
cos_theta_need = cos_theta[idx_, label]
theta = torch.acos(cos_theta_need)
cos_theta_m = torch.cos(theta + self.m)
cond_mask = (cos_theta_need - self.threshold) <= 0 # those should be replaced
if torch.any(cond_mask).item():
logging.info(f'this concatins a difficult sample, {cond_mask.sum().item()}')
# exit(1)
output[idx_, label] = cos_theta_m.type_as(output)
if self.m2 != 0:
with torch.no_grad():
cos_theta_neg = cos_theta.clone()
cos_theta_neg[idx_, label] = -self.s * 999
topk = conf.topk
# topkind2 = torch.argsort(cos_theta_neg, dim=1)[:, -topk:]
_, topkind = torch.topk(cos_theta_neg, topk, dim=1)
# assert (topkind2==topkind).cpu().detach().numpy().all(), f'{topkind2} {topkind}'
idx = torch.stack([idx_] * topk, dim=1)
cos_theta_neg_need = cos_theta[idx, topkind]
theta = torch.acos(cos_theta_neg_need)
cos_theta_neg_m = torch.cos(theta - self.m2)
cond_mask = (cos_theta_neg_need >= self.threshold2) # < is masked is what should not be replaced
if torch.any(cond_mask).item():
logging.info(f'neg concatins difficult samples '
f'{(cond_mask).sum().item()}')
# exit(1)
output[idx, topkind] = cos_theta_neg_m.type_as(output)
output *= self.s # scale up in order to make softmax work, first introduced in normface
self.step_param.data +=1
return output
forward = forward_eff_v2
class CosFace(Module):
# class CosFace(jit.ScriptModule):
__constants__ = ['m', 's']
r"""Implement of CosFace (https://arxiv.org/pdf/1801.09414.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
device_id: the ID of GPU where the model will be trained by model parallel.
if device_id=None, it will be trained on CPU without model parallel.
s: norm of input feature
m: margin
cos(theta)-m
"""
def __init__(self, embedding_size, classnum, s=conf.scale, m=conf.margin):
super(CosFace, self).__init__()
self.in_features = embedding_size
self.out_features = classnum
# self.s = torch.jit.const(s)
# self.m = torch.jit.Const(m)
self.s = s
self.m = m
self.weight = Parameter(torch.FloatTensor(classnum, embedding_size))
nn.init.xavier_uniform_(self.weight)
# @jit.script_method
def forward(self, embeddings, label=None):
embeddings = F.normalize(embeddings, dim=1)
nB = embeddings.shape[0]
idx_ = torch.arange(0, nB, dtype=torch.long)
cosine = F.linear(embeddings, F.normalize(self.weight))
if label is None:
return cosine
phi = cosine[idx_, label] - self.m
output = cosine.clone()
output[idx_, label] = phi
# # --------------------------- convert label to one-hot ---------------------------
# one_hot = torch.zeros(cosine.size()).cuda()
# one_hot.scatter_(1, label.view(-1, 1).long(), 1)
# # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
# output = (one_hot * phi) + (
# (1.0 - one_hot) * cosine) # you can use torch.where if your torch.__version__ is 0.4
output *= self.s
return output
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features = ' + str(self.in_features) \
+ ', out_features = ' + str(self.out_features) \
+ ', s = ' + str(self.s) \
+ ', m = ' + str(self.m) + ')'
class Am_softmax(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=conf.embedding_size, classnum=51332):
super(Am_softmax, self).__init__()
self.classnum = classnum
self.kernel = Parameter(torch.Tensor(embedding_size, classnum))
# initial kernel
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.m = 0.35 # additive margin recommended by the paper
self.s = 30. # see normface https://arxiv.org/abs/1704.06369
def forward(self, embeddings, label):
embeddings = F.normalize(embeddings, dim=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embeddings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
phi = cos_theta - self.m
label = label.view(-1, 1) # size=(B,1)
index = cos_theta.data * 0.0 # size=(B,Classnum)
index.scatter_(1, label.data.view(-1, 1), 1)
index = index.byte()
output = cos_theta * 1.0
output[index] = phi[index] # only change the correct predicted output
output *= self.s # scale up in order to make softmax work, first introduced in normface
return output
class TripletLoss(Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py.
Args:
- margin (float): margin for triplet.
"""
def __init__(self, margin=0.3):
super(TripletLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, embeddings, targets, return_info=False):
embeddings = F.normalize(embeddings, dim=1)
n = embeddings.size(0) # todo is this version correct?
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(embeddings, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist = dist.addmm(1, -2, embeddings, embeddings.t()).clamp(min=1e-6).sqrt() * conf.tri_scale
# todo how to use triplet only, can use temprature decay/progessive learinig curriculum learning
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
# a = to_numpy(targets)
# print(a.shape, np.unique(a).shape)
daps = dist[mask].view(n, -1) # here can use -1, assume the number of ap is the same, e.g., all is 4!
# todo how to copy with varied length?
dans = dist[mask == 0].view(n, -1)
ap_wei = F.softmax(daps.detach()* conf.pos_wei, dim=1)
an_wei = F.softmax(-dans.detach()*conf.neg_wei, dim=1)
dist_ap = (daps * ap_wei).sum(dim=1)
dist_an = (dans * an_wei).sum(dim=1)
loss_indiv = F.softplus(dist_ap - dist_an)
loss = loss_indiv.mean()
if not return_info:
return loss
else:
info = {'dap': dist_ap.mean().item(), 'dan': dist_an.mean().item(), 'indiv': loss_indiv}
return loss, info
def forward_slow(self, inputs, targets):
"""
Args:
- inputs: feature matrix with shape (batch_size, feat_dim)
- targets: ground truth labels with shape (num_classes)
"""
n = inputs.size(0)
# Compute pairwise distance, replace by the official when merged
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist + dist.t()
dist.addmm_(1, -2, inputs, inputs.t()).clamp_(min=1e-12).sqrt_()
# For each anchor, find the hardest positive and negative
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
dist_ap, dist_an = [], []
for i in range(n): # todo turn to matrix operation
# dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
# dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
tmp = mask[i]
# assert tmp[i].item()==1
tmp[i] = 0
daps = dist[i][mask[i]] # todo fx bug : should remove self, ? will it affects?
ap_wei = F.softmax(daps.detach(), dim=0)
# ap_wei = F.softmax(daps.detach() / (128 ** (1/2)), dim=0)
# ap_wei = F.softmax(daps, dim=0) # allow atention on weright
dist_ap.append((daps * ap_wei).sum().unsqueeze(0))
dans = dist[i][mask[i] == 0]
an_wei = F.softmax(-dans.detach(), dim=0)
# an_wei = F.softmax(-dans.detach() / (128 ** (1/2)) , dim=0)
# an_wei = F.softmax(-dans, dim=0)
dist_an.append((dans * an_wei).sum().unsqueeze(0))
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
# Compute ranking hinge loss
# y = torch.ones_like(dist_an)
# loss = self.ranking_loss(dist_an, dist_ap, y)
## soft margin
loss = F.softplus(dist_ap - dist_an).mean()
return loss
if __name__ == '__main__':
from lz import *
from pathlib import Path
init_dev(3)
# conf.input_size = 112
# conf.embedding_size = 512
# conf.bottle_neck = False
# conf.mbfc_se = False
# conf.net_depth = 18
# conf.out_type = 'fc'
# conf.mid_type = '' # 'gpool'
model = Backbone(conf.net_depth, 0.3, 'ir_se', ebsize=512)
# model = MobileFaceNet(2048,
# width_mult=1,
# depth_mult=1,)
model.eval()
model.cuda()
print(model)
# params = []
# # wmdm = "1.0,2.25 1.1,1.86 1.2,1.56 1.3,1.33 1.4,1.15 1.5,1.0".split(' ') # 1,2 1.56,2 1.0,1.0
# wmdm = "1.2,1.56".split(' ')
# wmdm = [(float(wd.split(',')[0]), float(wd.split(',')[1])) for wd in wmdm]
# for wd in wmdm:
# wm, dm = wd
# model = MobileFaceNet(512,
# width_mult=wm,
# depth_mult=dm,
# ).cuda()
# model.eval()
# print('mbfc:\n', model)
# ttl_params = (sum(p.numel() for p in model.parameters()) / 1000000.0)
# print('Total params: %.2fM' % ttl_params)
# params.append(ttl_params)
# print(params)
# plt.plot(wms, params)
# plt.show()
# dms = np.arange(1, 2, .01)
# params2 = []
# for dm in dms:
# model = MobileFaceNet(512,
# width_mult=1.,
# depth_mult=dm,
# ).cuda()
# model.eval()
# print('mobilenetv3:\n', model)
# ttl_params = (sum(p.numel() for p in model.parameters()) / 1000000.0)
# params2.append(ttl_params)
# print('Total params: %.2fM' % ttl_params)
# plt.plot(dms, params2)
# plt.show()
from thop import profile
from lz import timer
input = torch.randn(1, 3, conf.input_size, conf.input_size).cuda()
flops, params = profile(model, inputs=(input,),
# only_ops=(nn.Conv2d, nn.Linear),
verbose=False,
)
flops /= 10 ** 9
params /= 10 ** 6
print(params, flops, )
exit(1)
classifier = AdaMArcface(classnum=10).cuda()
classifier.train()
model.train()
bs = 32
input_size = (bs, 3, 112, 112)
target = to_torch(np.random.randint(low=0, high=10, size=(bs,)), ).cuda()
x = torch.rand(input_size).cuda()
out = model(x, )
logits, mrg_mn = classifier(out, target)
loss = nn.CrossEntropyLoss()(logits, target)
(loss - 10 * mrg_mn).backward()
print(loss, classifier.m.grad)
| [
"torch.nn.Dropout",
"torch.nn.init.uniform_",
"torch.nn.InstanceNorm2d",
"numpy.sin",
"torch.nn.init.constant_",
"numpy.random.randint",
"modules.bn.InPlaceABNSync",
"models.lpf.Downsample",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.utils.checkpoint.checkpoint_sequential",
"numpy.stack",... | [((7762, 7816), 'collections.namedtuple', 'namedtuple', (['"""Block"""', "['in_channel', 'depth', 'stride']"], {}), "('Block', ['in_channel', 'depth', 'stride'])\n", (7772, 7816), False, 'from collections import namedtuple\n'), ((26297, 26317), 'numpy.stack', 'np.stack', (['controller'], {}), '(controller)\n', (26305, 26317), True, 'import numpy as np\n'), ((26381, 26425), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['controller', 'np.float32'], {}), '(controller, np.float32)\n', (26401, 26425), True, 'import numpy as np\n'), ((79641, 79687), 'thop.profile', 'profile', (['model'], {'inputs': '(input,)', 'verbose': '(False)'}), '(model, inputs=(input,), verbose=False)\n', (79648, 79687), False, 'from thop import profile\n'), ((1184, 1204), 'torch.nn.ELU', 'nn.ELU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1190, 1204), False, 'from torch import nn\n'), ((1556, 1576), 'torch.nn.AdaptiveAvgPool2d', 'AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (1573, 1576), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((1596, 1681), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', '(channels // reduction)'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(channels, channels // reduction, kernel_size=1, padding=0, bias=False\n )\n', (1605, 1681), False, 'from torch import nn\n'), ((1807, 1892), 'torch.nn.Conv2d', 'nn.Conv2d', (['(channels // reduction)', 'channels'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(channels // reduction, channels, kernel_size=1, padding=0, bias=False\n )\n', (1816, 1892), False, 'from torch import nn\n'), ((1970, 1982), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1980, 1982), False, 'from torch import nn\n'), ((2029, 2074), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc1.weight.data'], {}), '(self.fc1.weight.data)\n', (2052, 2074), False, 'from torch import nn\n'), ((2083, 2128), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.fc2.weight.data'], {}), '(self.fc2.weight.data)\n', (2106, 2128), False, 'from torch import nn\n'), ((3347, 3365), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['depth'], {}), '(depth)\n', (3358, 3365), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((13569, 13589), 'torch.nn.Sequential', 'Sequential', (['*modules'], {}), '(*modules)\n', (13579, 13589), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((26733, 26851), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_c'], {'out_channels': 'out_c', 'kernel_size': 'kernel', 'groups': 'groups', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_c, out_channels=out_c, kernel_size=kernel, groups=groups,\n stride=stride, padding=padding, bias=False)\n', (26742, 26851), False, 'from torch import nn\n'), ((27427, 27545), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_c'], {'out_channels': 'out_c', 'kernel_size': 'kernel', 'groups': 'groups', 'stride': 'stride', 'padding': 'padding', 'bias': '(False)'}), '(in_c, out_channels=out_c, kernel_size=kernel, groups=groups,\n stride=stride, padding=padding, bias=False)\n', (27436, 27545), False, 'from torch import nn\n'), ((29751, 29771), 'torch.nn.Sequential', 'Sequential', (['*modules'], {}), '(*modules)\n', (29761, 29771), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((33621, 33651), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['embedding_size'], {}), '(embedding_size)\n', (33635, 33651), False, 'from torch import nn\n'), ((35705, 35741), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.kernel'], {}), '(self.kernel)\n', (35728, 35741), False, 'from torch import nn\n'), ((38629, 38665), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.kernel'], {}), '(self.kernel)\n', (38652, 38665), False, 'from torch import nn\n'), ((47250, 47297), 'torch.nn.Linear', 'nn.Linear', (['embedding_size', 'classnum'], {'bias': '(False)'}), '(embedding_size, classnum, bias=False)\n', (47259, 47297), False, 'from torch import nn\n'), ((47788, 47801), 'numpy.float32', 'np.float32', (['m'], {}), '(m)\n', (47798, 47801), True, 'import numpy as np\n'), ((47815, 47832), 'numpy.float32', 'np.float32', (['np.pi'], {}), '(np.pi)\n', (47825, 47832), True, 'import numpy as np\n'), ((51830, 51843), 'numpy.float32', 'np.float32', (['m'], {}), '(m)\n', (51840, 51843), True, 'import numpy as np\n'), ((51857, 51874), 'numpy.float32', 'np.float32', (['np.pi'], {}), '(np.pi)\n', (51867, 51874), True, 'import numpy as np\n'), ((52047, 52056), 'numpy.cos', 'np.cos', (['m'], {}), '(m)\n', (52053, 52056), True, 'import numpy as np\n'), ((52109, 52118), 'numpy.sin', 'np.sin', (['m'], {}), '(m)\n', (52115, 52118), True, 'import numpy as np\n'), ((63215, 63224), 'numpy.cos', 'np.cos', (['m'], {}), '(m)\n', (63221, 63224), True, 'import numpy as np\n'), ((63246, 63255), 'numpy.sin', 'np.sin', (['m'], {}), '(m)\n', (63252, 63255), True, 'import numpy as np\n'), ((71269, 71305), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (71292, 71305), False, 'from torch import nn\n'), ((74295, 74330), 'torch.nn.MarginRankingLoss', 'nn.MarginRankingLoss', ([], {'margin': 'margin'}), '(margin=margin)\n', (74315, 74330), False, 'from torch import nn\n'), ((80222, 80243), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (80241, 80243), False, 'from torch import nn\n'), ((1766, 1787), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1773, 1787), False, 'from torch import nn\n'), ((3211, 3251), 'modules.bn.InPlaceABNSync', 'InPlaceABNSync', (['depth'], {'activation': '"""none"""'}), "(depth, activation='none')\n", (3225, 3251), False, 'from modules.bn import InPlaceABN, InPlaceABNSync\n'), ((3285, 3321), 'modules.bn.InPlaceABN', 'InPlaceABN', (['depth'], {'activation': '"""none"""'}), "(depth, activation='none')\n", (3295, 3321), False, 'from modules.bn import InPlaceABN, InPlaceABNSync\n'), ((3629, 3649), 'torch.nn.MaxPool2d', 'MaxPool2d', (['(1)', 'stride'], {}), '(1, stride)\n', (3638, 3649), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((5183, 5252), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', '(in_channel // 4)', '(1, 1)', '(1, 1)', '(0)'], {'bias': '(False)'}), '(in_channel, in_channel // 4, (1, 1), (1, 1), 0, bias=False)\n', (5192, 5252), False, 'from torch import nn\n'), ((5322, 5396), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channel // 4)', '(in_channel // 4)', '(3, 3)', 'stride', '(1)'], {'bias': '(False)'}), '(in_channel // 4, in_channel // 4, (3, 3), stride, 1, bias=False)\n', (5331, 5396), False, 'from torch import nn\n'), ((5466, 5525), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_channel // 4)', 'depth', '(1, 1)', '(1)', '(0)'], {'bias': '(False)'}), '(in_channel // 4, depth, (1, 1), 1, 0, bias=False)\n', (5475, 5525), False, 'from torch import nn\n'), ((7199, 7236), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['depth'], {'affine': '(True)'}), '(depth, affine=True)\n', (7216, 7236), False, 'from torch import nn\n'), ((15728, 15755), 'torch.nn.Linear', 'nn.Linear', (['(4)', '(1)'], {'bias': '(False)'}), '(4, 1, bias=False)\n', (15737, 15755), False, 'from torch import nn\n'), ((16405, 16461), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.output_layer_sigma[-1].weight', '(0)'], {}), '(self.output_layer_sigma[-1].weight, 0)\n', (16422, 16461), False, 'from torch import nn\n'), ((16474, 16528), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.output_layer_sigma[-1].bias', '(1)'], {}), '(self.output_layer_sigma[-1].bias, 1)\n', (16491, 16528), False, 'from torch import nn\n'), ((20951, 20998), 'torch.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (20985, 20998), False, 'from torch.nn import init\n'), ((21053, 21092), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (21066, 21092), False, 'from torch.nn import init\n'), ((24016, 24063), 'torch.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (24050, 24063), False, 'from torch.nn import init\n'), ((24118, 24157), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (24131, 24157), False, 'from torch.nn import init\n'), ((26959, 26992), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (26981, 26992), False, 'from torch import nn\n'), ((27648, 27681), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (27670, 27681), False, 'from torch import nn\n'), ((28782, 28832), 'models.lpf.Downsample', 'Downsample', ([], {'channels': 'groups', 'filt_size': '(5)', 'stride': '(2)'}), '(channels=groups, filt_size=5, stride=2)\n', (28792, 28832), False, 'from models.lpf import Downsample\n'), ((29909, 29940), 'numpy.ceil', 'np.ceil', (['(x * 1.0 / divisible_by)'], {}), '(x * 1.0 / divisible_by)\n', (29916, 29940), True, 'import numpy as np\n'), ((33567, 33602), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.linear'], {}), '(self.linear)\n', (33589, 33602), False, 'from torch import nn\n'), ((47503, 47533), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['kernel'], {}), '(kernel)\n', (47525, 47533), False, 'from torch import nn\n'), ((51446, 51476), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['kernel'], {}), '(kernel)\n', (51468, 51476), False, 'from torch import nn\n'), ((52167, 52176), 'numpy.sin', 'np.sin', (['m'], {}), '(m)\n', (52173, 52176), True, 'import numpy as np\n'), ((61492, 61541), 'logging.info', 'logging.info', (['"""this concatins a difficult sample"""'], {}), "('this concatins a difficult sample')\n", (61504, 61541), False, 'import functools, logging\n'), ((62915, 62928), 'numpy.float16', 'np.float16', (['m'], {}), '(m)\n', (62925, 62928), True, 'import numpy as np\n'), ((62946, 62963), 'numpy.float16', 'np.float16', (['np.pi'], {}), '(np.pi)\n', (62956, 62963), True, 'import numpy as np\n'), ((62994, 63007), 'numpy.float32', 'np.float32', (['m'], {}), '(m)\n', (63004, 63007), True, 'import numpy as np\n'), ((63025, 63042), 'numpy.float32', 'np.float32', (['np.pi'], {}), '(np.pi)\n', (63035, 63042), True, 'import numpy as np\n'), ((347, 359), 'numpy.sqrt', 'np.sqrt', (['(0.9)'], {}), '(0.9)\n', (354, 359), True, 'import numpy as np\n'), ((2937, 2973), 'modules.bn.InPlaceABN', 'InPlaceABN', (['depth'], {'activation': '"""none"""'}), "(depth, activation='none')\n", (2947, 2973), False, 'from modules.bn import InPlaceABN, InPlaceABNSync\n'), ((3026, 3044), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['depth'], {}), '(depth)\n', (3037, 3044), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((3099, 3117), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['depth'], {}), '(depth)\n', (3110, 3117), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((3726, 3782), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(1, 1)', 'stride'], {'bias': '(False)'}), '(in_channel, depth, (1, 1), stride, bias=False)\n', (3735, 3782), False, 'from torch import nn\n'), ((3800, 3818), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['depth'], {}), '(depth)\n', (3811, 3818), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((3958, 4017), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(3, 3)', '(1, 1)', '(1)'], {'bias': '(False)'}), '(in_channel, depth, (3, 3), (1, 1), 1, bias=False)\n', (3967, 4017), False, 'from torch import nn\n'), ((4080, 4134), 'torch.nn.Conv2d', 'nn.Conv2d', (['depth', 'depth', '(3, 3)', 'stride', '(1)'], {'bias': '(False)'}), '(depth, depth, (3, 3), stride, 1, bias=False)\n', (4089, 4134), False, 'from torch import nn\n'), ((4253, 4276), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['in_channel'], {}), '(in_channel)\n', (4264, 4276), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((4294, 4353), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(3, 3)', '(1, 1)', '(1)'], {'bias': '(False)'}), '(in_channel, depth, (3, 3), (1, 1), 1, bias=False)\n', (4303, 4353), False, 'from torch import nn\n'), ((4386, 4440), 'torch.nn.Conv2d', 'nn.Conv2d', (['depth', 'depth', '(3, 3)', 'stride', '(1)'], {'bias': '(False)'}), '(depth, depth, (3, 3), stride, 1, bias=False)\n', (4395, 4440), False, 'from torch import nn\n'), ((4442, 4460), 'torch.nn.BatchNorm2d', 'BatchNorm2d', (['depth'], {}), '(depth)\n', (4453, 4460), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((4960, 5016), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(1, 1)', 'stride'], {'bias': '(False)'}), '(in_channel, depth, (1, 1), stride, bias=False)\n', (4969, 5016), False, 'from torch import nn\n'), ((10431, 10473), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(3, 64, (3, 3), 1, 1, bias=False)\n', (10440, 10473), False, 'from torch import nn\n'), ((12207, 12226), 'torch.nn.Dropout', 'Dropout', (['drop_ratio'], {}), '(drop_ratio)\n', (12214, 12226), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((12271, 12376), 'torch.nn.Linear', 'nn.Linear', (['(out_planes * out_resolution ** 2)', 'ebsize'], {'bias': '(True if not conf.upgrade_bnneck else False)'}), '(out_planes * out_resolution ** 2, ebsize, bias=True if not conf.\n upgrade_bnneck else False)\n', (12280, 12376), False, 'from torch import nn\n'), ((12415, 12437), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (12429, 12437), False, 'from torch import nn\n'), ((19186, 19224), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (19209, 19224), False, 'from torch import nn\n'), ((19839, 19882), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.res_layer[5].weight', '(0)'], {}), '(m.res_layer[5].weight, 0)\n', (19856, 19882), False, 'from torch import nn\n'), ((33837, 33875), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (33860, 33875), False, 'from torch import nn\n'), ((36880, 36902), 'numpy.log', 'np.log', (['input.shape[0]'], {}), '(input.shape[0])\n', (36886, 36902), True, 'import numpy as np\n'), ((36931, 36960), 'numpy.log', 'np.log', (['(self.k / (1 - self.k))'], {}), '(self.k / (1 - self.k))\n', (36937, 36960), True, 'import numpy as np\n'), ((39632, 39642), 'numpy.log', 'np.log', (['bs'], {}), '(bs)\n', (39638, 39642), True, 'import numpy as np\n'), ((39742, 39771), 'numpy.log', 'np.log', (['(self.k / (1 - self.k))'], {}), '(self.k / (1 - self.k))\n', (39748, 39771), True, 'import numpy as np\n'), ((57509, 57576), 'logging.info', 'logging.info', (["(f'pos_med: {tpmean:.2e} ' + f'neg_med: {tnmean:.2e} ')"], {}), "(f'pos_med: {tpmean:.2e} ' + f'neg_med: {tnmean:.2e} ')\n", (57521, 57576), False, 'import functools, logging\n'), ((80050, 80095), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': '(bs,)'}), '(low=0, high=10, size=(bs,))\n', (80067, 80095), True, 'import numpy as np\n'), ((2695, 2735), 'modules.bn.InPlaceABNSync', 'InPlaceABNSync', (['depth'], {'activation': '"""none"""'}), "(depth, activation='none')\n", (2709, 2735), False, 'from modules.bn import InPlaceABN, InPlaceABNSync\n'), ((2822, 2858), 'modules.bn.InPlaceABN', 'InPlaceABN', (['depth'], {'activation': '"""none"""'}), "(depth, activation='none')\n", (2832, 2858), False, 'from modules.bn import InPlaceABN, InPlaceABNSync\n'), ((6081, 6107), 'torch.nn.Conv2d', 'nn.Conv2d', (['*args'], {}), '(*args, **kwargs)\n', (6090, 6107), False, 'from torch import nn\n'), ((6319, 6375), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(1, 1)', 'stride'], {'bias': '(False)'}), '(in_channel, depth, (1, 1), stride, bias=False)\n', (6328, 6375), False, 'from torch import nn\n'), ((6540, 6577), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (6552, 6577), False, 'from torch import nn\n'), ((6599, 6645), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'depth', '(1)', '(1)'], {'bias': '(False)'}), '(in_channel, depth, 1, 1, bias=False)\n', (6608, 6645), False, 'from torch import nn\n'), ((10682, 10724), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(3, 64, (3, 3), 1, 1, bias=False)\n', (10691, 10724), False, 'from torch import nn\n'), ((10886, 10929), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(64, 64, (3, 3), 1, 1, bias=False)\n', (10895, 10929), False, 'from torch import nn\n'), ((12588, 12607), 'torch.nn.Dropout', 'Dropout', (['drop_ratio'], {}), '(drop_ratio)\n', (12595, 12607), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((12660, 12765), 'torch.nn.Linear', 'nn.Linear', (['(out_planes * out_resolution ** 2)', 'ebsize'], {'bias': '(True if not conf.upgrade_bnneck else False)'}), '(out_planes * out_resolution ** 2, ebsize, bias=True if not conf.\n upgrade_bnneck else False)\n', (12669, 12765), False, 'from torch import nn\n'), ((12812, 12834), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (12826, 12834), False, 'from torch import nn\n'), ((12985, 13008), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (13005, 13008), False, 'from torch import nn\n'), ((13053, 13094), 'torch.nn.Linear', 'nn.Linear', (['out_planes', 'ebsize'], {'bias': '(False)'}), '(out_planes, ebsize, bias=False)\n', (13062, 13094), False, 'from torch import nn\n'), ((13112, 13134), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (13126, 13134), False, 'from torch import nn\n'), ((13760, 13783), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (13780, 13783), False, 'from torch import nn\n'), ((13856, 13902), 'torch.nn.Linear', 'nn.Linear', (['(64 * expansions)', 'ebsize'], {'bias': '(False)'}), '(64 * expansions, ebsize, bias=False)\n', (13865, 13902), False, 'from torch import nn\n'), ((13934, 13956), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (13948, 13956), False, 'from torch import nn\n'), ((14057, 14080), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (14077, 14080), False, 'from torch import nn\n'), ((14153, 14200), 'torch.nn.Linear', 'nn.Linear', (['(128 * expansions)', 'ebsize'], {'bias': '(False)'}), '(128 * expansions, ebsize, bias=False)\n', (14162, 14200), False, 'from torch import nn\n'), ((14232, 14254), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (14246, 14254), False, 'from torch import nn\n'), ((14355, 14378), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (14375, 14378), False, 'from torch import nn\n'), ((14451, 14498), 'torch.nn.Linear', 'nn.Linear', (['(256 * expansions)', 'ebsize'], {'bias': '(False)'}), '(256 * expansions, ebsize, bias=False)\n', (14460, 14498), False, 'from torch import nn\n'), ((14530, 14552), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (14544, 14552), False, 'from torch import nn\n'), ((18612, 18650), 'torch.utils.checkpoint.checkpoint_sequential', 'checkpoint_sequential', (['self.body', '(2)', 'x'], {}), '(self.body, 2, x)\n', (18633, 18650), False, 'from torch.utils.checkpoint import checkpoint_sequential\n'), ((66178, 66193), 'numpy.cos', 'np.cos', (['self.m2'], {}), '(self.m2)\n', (66184, 66193), True, 'import numpy as np\n'), ((66212, 66227), 'numpy.sin', 'np.sin', (['self.m2'], {}), '(self.m2)\n', (66218, 66227), True, 'import numpy as np\n'), ((11178, 11220), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(3, 64, (3, 3), 1, 1, bias=False)\n', (11187, 11220), False, 'from torch import nn\n'), ((11264, 11298), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(64)'], {'affine': '(True)'}), '(64, affine=True)\n', (11281, 11298), False, 'from torch import nn\n'), ((11396, 11439), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(64, 64, (3, 3), 1, 1, bias=False)\n', (11405, 11439), False, 'from torch import nn\n'), ((11483, 11517), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(64)'], {'affine': '(True)'}), '(64, affine=True)\n', (11500, 11517), False, 'from torch import nn\n'), ((14740, 14759), 'torch.nn.Dropout', 'Dropout', (['drop_ratio'], {}), '(drop_ratio)\n', (14747, 14759), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((14832, 14906), 'torch.nn.Linear', 'nn.Linear', (['(64 * expansions * (out_resolution * 8) ** 2)', 'ebsize'], {'bias': '(False)'}), '(64 * expansions * (out_resolution * 8) ** 2, ebsize, bias=False)\n', (14841, 14906), False, 'from torch import nn\n'), ((14938, 14960), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (14952, 14960), False, 'from torch import nn\n'), ((15061, 15080), 'torch.nn.Dropout', 'Dropout', (['drop_ratio'], {}), '(drop_ratio)\n', (15068, 15080), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((15153, 15228), 'torch.nn.Linear', 'nn.Linear', (['(128 * expansions * (out_resolution * 4) ** 2)', 'ebsize'], {'bias': '(False)'}), '(128 * expansions * (out_resolution * 4) ** 2, ebsize, bias=False)\n', (15162, 15228), False, 'from torch import nn\n'), ((15260, 15282), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (15274, 15282), False, 'from torch import nn\n'), ((15383, 15402), 'torch.nn.Dropout', 'Dropout', (['drop_ratio'], {}), '(drop_ratio)\n', (15390, 15402), False, 'from torch.nn import BatchNorm1d, BatchNorm2d, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Parameter\n'), ((15475, 15550), 'torch.nn.Linear', 'nn.Linear', (['(256 * expansions * (out_resolution * 2) ** 2)', 'ebsize'], {'bias': '(False)'}), '(256 * expansions * (out_resolution * 2) ** 2, ebsize, bias=False)\n', (15484, 15550), False, 'from torch import nn\n'), ((15582, 15604), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['ebsize'], {}), '(ebsize)\n', (15596, 15604), False, 'from torch import nn\n'), ((11706, 11748), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)', '(1)', '(1)'], {'bias': '(False)'}), '(3, 64, (3, 3), 1, 1, bias=False)\n', (11715, 11748), False, 'from torch import nn\n'), ((11792, 11826), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['(64)'], {'affine': '(True)'}), '(64, affine=True)\n', (11809, 11826), False, 'from torch import nn\n'), ((19609, 19647), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (19632, 19647), False, 'from torch import nn\n'), ((34260, 34298), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (34283, 34298), False, 'from torch import nn\n'), ((26105, 26115), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (26111, 26115), True, 'import numpy as np\n'), ((26123, 26133), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (26129, 26133), True, 'import numpy as np\n'), ((26183, 26193), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (26189, 26193), True, 'import numpy as np\n'), ((26200, 26210), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (26206, 26210), True, 'import numpy as np\n')] |
import warnings
try:
import matplotlib.pyplot as pl
except ImportError:
warnings.warn("matplotlib could not be loaded!")
pass
from ._labels import labels
from ..utils import format_value, ordinal_str
from ._utils import convert_ordering, convert_color, merge_nodes, get_sort_order, sort_inds, dendrogram_coords
from . import colors
import numpy as np
import scipy
import copy
from .. import Explanation, Cohorts
# TODO: improve the bar chart to look better like the waterfall plot with numbers inside the bars when they fit
# TODO: Have the Explanation object track enough data so that we can tell (and so show) how many instances are in each cohort
def bar(shap_values, max_display=10, order=Explanation.abs, clustering=None, clustering_cutoff=0.5,
merge_cohorts=False, show_data="auto", show=True):
""" Create a bar plot of a set of SHAP values.
If a single sample is passed then we plot the SHAP values as a bar chart. If an
Explanation with many samples is passed then we plot the mean absolute value for
each feature column as a bar chart.
Parameters
----------
shap_values : shap.Explanation or shap.Cohorts or dictionary of shap.Explanation objects
A single row of a SHAP Explanation object (i.e. shap_values[0]) or a multi-row Explanation
object that we want to summarize.
max_display : int
The maximum number of bars to display.
show : bool
If show is set to False then we don't call the matplotlib.pyplot.show() function. This allows
further customization of the plot by the caller after the bar() function is finished.
"""
# assert str(type(shap_values)).endswith("Explanation'>"), "The shap_values paramemter must be a shap.Explanation object!"
# convert Explanation objects to dictionaries
if isinstance(shap_values, Explanation):
cohorts = {"": shap_values}
elif isinstance(shap_values, Cohorts):
cohorts = shap_values.cohorts
else:
assert isinstance(shap_values, dict), "You must pass an Explanation object, Cohorts object, or dictionary to bar plot!"
# unpack our list of Explanation objects we need to plot
cohort_labels = list(cohorts.keys())
cohort_exps = list(cohorts.values())
for i in range(len(cohort_exps)):
if len(cohort_exps[i].shape) == 2:
cohort_exps[i] = cohort_exps[i].abs.mean(0)
assert isinstance(cohort_exps[i], Explanation), "The shap_values paramemter must be a Explanation object, Cohorts object, or dictionary of Explanation objects!"
assert cohort_exps[i].shape == cohort_exps[0].shape, "When passing several Explanation objects they must all have the same shape!"
# TODO: check other attributes for equality? like feature names perhaps? probably clustering as well.
# unpack the Explanation object
features = cohort_exps[0].data
feature_names = cohort_exps[0].feature_names
if clustering is None:
partition_tree = getattr(cohort_exps[0], "clustering", None)
elif clustering is False:
partition_tree = None
else:
partition_tree = clustering
if partition_tree is not None:
assert partition_tree.shape[1] == 4, "The clustering provided by the Explanation object does not seem to be a partition tree (which is all shap.plots.bar supports)!"
op_history = cohort_exps[0].op_history
values = np.array([cohort_exps[i].values for i in range(len(cohort_exps))])
if len(values[0]) == 0:
raise Exception("The passed Explanation is empty! (so there is nothing to plot)")
# we show the data on auto only when there are no transforms
if show_data == "auto":
show_data = len(op_history) == 0
# TODO: Rather than just show the "1st token", "2nd token", etc. it would be better to show the "Instance 0's 1st but", etc
if issubclass(type(feature_names), str):
feature_names = [ordinal_str(i)+" "+feature_names for i in range(len(values[0]))]
# build our auto xlabel based on the transform history of the Explanation object
xlabel = "SHAP value"
for op in op_history:
if op["name"] == "abs":
xlabel = "|"+xlabel+"|"
elif op["name"] == "__getitem__":
pass # no need for slicing to effect our label, it will be used later to find the sizes of cohorts
else:
xlabel = str(op["name"])+"("+xlabel+")"
# find how many instances are in each cohort (if they were created from an Explanation object)
cohort_sizes = []
for exp in cohort_exps:
for op in exp.op_history:
if op.get("collapsed_instances", False): # see if this if the first op to collapse the instances
cohort_sizes.append(op["prev_shape"][0])
break
# unwrap any pandas series
if str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
# ensure we at least have default feature names
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(values[0]))])
# determine how many top features we will plot
if max_display is None:
max_display = len(feature_names)
num_features = min(max_display, len(values[0]))
# iteratively merge nodes until we can cut off the smallest feature values to stay within
# num_features without breaking a cluster tree
orig_inds = [[i] for i in range(len(values[0]))]
orig_values = values.copy()
while True:
feature_order = np.argsort(np.mean([np.argsort(convert_ordering(order, Explanation(values[i]))) for i in range(values.shape[0])], 0))
if partition_tree is not None:
# compute the leaf order if we were to show (and so have the ordering respect) the whole partition tree
clust_order = sort_inds(partition_tree, np.abs(values).mean(0))
# now relax the requirement to match the parition tree ordering for connections above clustering_cutoff
dist = scipy.spatial.distance.squareform(scipy.cluster.hierarchy.cophenet(partition_tree))
feature_order = get_sort_order(dist, clust_order, clustering_cutoff, feature_order)
# if the last feature we can display is connected in a tree the next feature then we can't just cut
# off the feature ordering, so we need to merge some tree nodes and then try again.
if max_display < len(feature_order) and dist[feature_order[max_display-1],feature_order[max_display-2]] <= clustering_cutoff:
#values, partition_tree, orig_inds = merge_nodes(values, partition_tree, orig_inds)
partition_tree, ind1, ind2 = merge_nodes(np.abs(values).mean(0), partition_tree)
for i in range(len(values)):
values[:,ind1] += values[:,ind2]
values = np.delete(values, ind2, 1)
orig_inds[ind1] += orig_inds[ind2]
del orig_inds[ind2]
else:
break
else:
break
# here we build our feature names, accounting for the fact that some features might be merged together
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds), 0, -1)
feature_names_new = []
for pos,inds in enumerate(orig_inds):
if len(inds) == 1:
feature_names_new.append(feature_names[inds[0]])
elif len(inds) <= 2:
feature_names_new.append(" + ".join([feature_names[i] for i in inds]))
else:
max_ind = np.argmax(np.abs(orig_values).mean(0)[inds])
feature_names_new.append(feature_names[inds[max_ind]] + " + %d other features" % (len(inds)-1))
feature_names = feature_names_new
# see how many individual (vs. grouped at the end) features we are plotting
if num_features < len(values[0]):
num_cut = np.sum([len(orig_inds[feature_order[i]]) for i in range(num_features-1, len(values[0]))])
values[:,feature_order[num_features-1]] = np.sum([values[:,feature_order[i]] for i in range(num_features-1, len(values[0]))], 0)
# build our y-tick labels
yticklabels = []
for i in feature_inds:
if features is not None and show_data:
yticklabels.append(format_value(features[i], "%0.03f") + " = " + feature_names[i])
else:
yticklabels.append(feature_names[i])
if num_features < len(values[0]):
yticklabels[-1] = "Sum of %d other features" % num_cut
# compute our figure size based on how many features we are showing
row_height = 0.5
pl.gcf().set_size_inches(8, num_features * row_height * np.sqrt(len(values)) + 1.5)
# if negative values are present then we draw a vertical line to mark 0, otherwise the axis does this for us...
negative_values_present = np.sum(values[:,feature_order[:num_features]] < 0) > 0
if negative_values_present:
pl.axvline(0, 0, 1, color="#000000", linestyle="-", linewidth=1, zorder=1)
# draw the bars
patterns = (None, '\\\\', '++', 'xx', '////', '*', 'o', 'O', '.', '-')
total_width = 0.7
bar_width = total_width / len(values)
for i in range(len(values)):
ypos_offset = - ((i - len(values) / 2) * bar_width + bar_width / 2)
pl.barh(
y_pos + ypos_offset, values[i,feature_inds],
bar_width, align='center',
color=[colors.blue_rgb if values[i,feature_inds[j]] <= 0 else colors.red_rgb for j in range(len(y_pos))],
hatch=patterns[i], edgecolor=(1,1,1,0.8), label=f"{cohort_labels[i]} [{cohort_sizes[i] if i < len(cohort_sizes) else None}]"
)
# draw the yticks (the 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks)
pl.yticks(list(y_pos) + list(y_pos + 1e-8), yticklabels + [l.split('=')[-1] for l in yticklabels], fontsize=13)
xlen = pl.xlim()[1] - pl.xlim()[0]
fig = pl.gcf()
ax = pl.gca()
#xticks = ax.get_xticks()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
bbox_to_xscale = xlen/width
for i in range(len(values)):
ypos_offset = - ((i - len(values) / 2) * bar_width + bar_width / 2)
for j in range(len(y_pos)):
ind = feature_order[j]
if values[i,ind] < 0:
pl.text(
values[i,ind] - (5/72)*bbox_to_xscale, y_pos[j] + ypos_offset, format_value(values[i,ind], '%+0.02f'),
horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,
fontsize=12
)
else:
pl.text(
values[i,ind] + (5/72)*bbox_to_xscale, y_pos[j] + ypos_offset, format_value(values[i,ind], '%+0.02f'),
horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,
fontsize=12
)
# put horizontal lines for each feature row
for i in range(num_features):
pl.axhline(i+1, color="#888888", lw=0.5, dashes=(1, 5), zorder=-1)
if features is not None:
features = list(features)
# try and round off any trailing zeros after the decimal point in the feature values
for i in range(len(features)):
try:
if round(features[i]) == features[i]:
features[i] = int(features[i])
except:
pass # features[i] must not be a number
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
if negative_values_present:
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params('x', labelsize=11)
xmin,xmax = pl.gca().get_xlim()
ymin,ymax = pl.gca().get_ylim()
if negative_values_present:
pl.gca().set_xlim(xmin - (xmax-xmin)*0.05, xmax + (xmax-xmin)*0.05)
else:
pl.gca().set_xlim(xmin, xmax + (xmax-xmin)*0.05)
# if features is None:
# pl.xlabel(labels["GLOBAL_VALUE"], fontsize=13)
# else:
pl.xlabel(xlabel, fontsize=13)
if len(values) > 1:
pl.legend(fontsize=12)
# color the y tick labels that have the feature values as gray
# (these fall behind the black ones with just the feature name)
tick_labels = pl.gca().yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color("#999999")
# draw a dendrogram if we are given a partition tree
if partition_tree is not None:
# compute the dendrogram line positions based on our current feature order
feature_pos = np.argsort(feature_order)
ylines,xlines = dendrogram_coords(feature_pos, partition_tree)
# plot the distance cut line above which we don't show tree edges
xmin,xmax = pl.xlim()
xlines_min,xlines_max = np.min(xlines),np.max(xlines)
ct_line_pos = (clustering_cutoff / (xlines_max - xlines_min)) * 0.1 * (xmax - xmin) + xmax
pl.text(
ct_line_pos + 0.005 * (xmax - xmin), (ymax - ymin)/2, "Clustering cutoff = " + format_value(clustering_cutoff, '%0.02f'),
horizontalalignment='left', verticalalignment='center', color="#999999",
fontsize=12, rotation=-90
)
l = pl.axvline(ct_line_pos, color="#dddddd", dashes=(1, 1))
l.set_clip_on(False)
for (xline, yline) in zip(xlines, ylines):
# normalize the x values to fall between 0 and 1
xv = (np.array(xline) / (xlines_max - xlines_min))
# only draw if we are not going past distance threshold
if np.array(xline).max() <= clustering_cutoff:
# only draw if we are not going past the bottom of the plot
if yline.max() < max_display:
l = pl.plot(
xv * 0.1 * (xmax - xmin) + xmax,
max_display - np.array(yline),
color="#999999"
)
for v in l:
v.set_clip_on(False)
if show:
pl.show()
# def compute_sort_counts(partition_tree, leaf_values, pos=None):
# if pos is None:
# pos = partition_tree.shape[0]-1
# M = partition_tree.shape[0] + 1
# if pos < 0:
# return 1,leaf_values[pos + M]
# left = int(partition_tree[pos, 0]) - M
# right = int(partition_tree[pos, 1]) - M
# left_val,left_sum = compute_sort_counts(partition_tree, leaf_values, left)
# right_val,right_sum = compute_sort_counts(partition_tree, leaf_values, right)
# if left_sum > right_sum:
# left_val = right_val + 1
# else:
# right_val = left_val + 1
# if left >= 0:
# partition_tree[left,3] = left_val
# if right >= 0:
# partition_tree[right,3] = right_val
# return max(left_val, right_val) + 1, max(left_sum, right_sum)
def bar_legacy(shap_values, features=None, feature_names=None, max_display=None, show=True):
# unwrap pandas series
if str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(len(shap_values))])
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
feature_order = np.argsort(-np.abs(shap_values))
#
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds), 0, -1)
pl.barh(
y_pos, shap_values[feature_inds],
0.7, align='center',
color=[colors.red_rgb if shap_values[feature_inds[i]] > 0 else colors.blue_rgb for i in range(len(y_pos))]
)
pl.yticks(y_pos, fontsize=13)
if features is not None:
features = list(features)
# try and round off any trailing zeros after the decimal point in the feature values
for i in range(len(features)):
try:
if round(features[i]) == features[i]:
features[i] = int(features[i])
except TypeError:
pass # features[i] must not be a number
yticklabels = []
for i in feature_inds:
if features is not None:
yticklabels.append(feature_names[i] + " = " + str(features[i]))
else:
yticklabels.append(feature_names[i])
pl.gca().set_yticklabels(yticklabels)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
#pl.gca().spines['left'].set_visible(False)
pl.xlabel("SHAP value (impact on model output)")
if show:
pl.show() | [
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.xlim",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"numpy.argsort",
"numpy.min",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.gca",
"warnings.warn"... | [((10041, 10049), 'matplotlib.pyplot.gcf', 'pl.gcf', ([], {}), '()\n', (10047, 10049), True, 'import matplotlib.pyplot as pl\n'), ((10059, 10067), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (10065, 10067), True, 'import matplotlib.pyplot as pl\n'), ((12314, 12344), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['xlabel'], {'fontsize': '(13)'}), '(xlabel, fontsize=13)\n', (12323, 12344), True, 'import matplotlib.pyplot as pl\n'), ((16187, 16216), 'matplotlib.pyplot.yticks', 'pl.yticks', (['y_pos'], {'fontsize': '(13)'}), '(y_pos, fontsize=13)\n', (16196, 16216), True, 'import matplotlib.pyplot as pl\n'), ((17128, 17176), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""SHAP value (impact on model output)"""'], {}), "('SHAP value (impact on model output)')\n", (17137, 17176), True, 'import matplotlib.pyplot as pl\n'), ((80, 128), 'warnings.warn', 'warnings.warn', (['"""matplotlib could not be loaded!"""'], {}), "('matplotlib could not be loaded!')\n", (93, 128), False, 'import warnings\n'), ((8968, 9019), 'numpy.sum', 'np.sum', (['(values[:, feature_order[:num_features]] < 0)'], {}), '(values[:, feature_order[:num_features]] < 0)\n', (8974, 9019), True, 'import numpy as np\n'), ((9063, 9137), 'matplotlib.pyplot.axvline', 'pl.axvline', (['(0)', '(0)', '(1)'], {'color': '"""#000000"""', 'linestyle': '"""-"""', 'linewidth': '(1)', 'zorder': '(1)'}), "(0, 0, 1, color='#000000', linestyle='-', linewidth=1, zorder=1)\n", (9073, 9137), True, 'import matplotlib.pyplot as pl\n'), ((11170, 11238), 'matplotlib.pyplot.axhline', 'pl.axhline', (['(i + 1)'], {'color': '"""#888888"""', 'lw': '(0.5)', 'dashes': '(1, 5)', 'zorder': '(-1)'}), "(i + 1, color='#888888', lw=0.5, dashes=(1, 5), zorder=-1)\n", (11180, 11238), True, 'import matplotlib.pyplot as pl\n'), ((12378, 12400), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (12387, 12400), True, 'import matplotlib.pyplot as pl\n'), ((12877, 12902), 'numpy.argsort', 'np.argsort', (['feature_order'], {}), '(feature_order)\n', (12887, 12902), True, 'import numpy as np\n'), ((13077, 13086), 'matplotlib.pyplot.xlim', 'pl.xlim', ([], {}), '()\n', (13084, 13086), True, 'import matplotlib.pyplot as pl\n'), ((13544, 13599), 'matplotlib.pyplot.axvline', 'pl.axvline', (['ct_line_pos'], {'color': '"""#dddddd"""', 'dashes': '(1, 1)'}), "(ct_line_pos, color='#dddddd', dashes=(1, 1))\n", (13554, 13599), True, 'import matplotlib.pyplot as pl\n'), ((14387, 14396), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (14394, 14396), True, 'import matplotlib.pyplot as pl\n'), ((17203, 17212), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (17210, 17212), True, 'import matplotlib.pyplot as pl\n'), ((8737, 8745), 'matplotlib.pyplot.gcf', 'pl.gcf', ([], {}), '()\n', (8743, 8745), True, 'import matplotlib.pyplot as pl\n'), ((10003, 10012), 'matplotlib.pyplot.xlim', 'pl.xlim', ([], {}), '()\n', (10010, 10012), True, 'import matplotlib.pyplot as pl\n'), ((10018, 10027), 'matplotlib.pyplot.xlim', 'pl.xlim', ([], {}), '()\n', (10025, 10027), True, 'import matplotlib.pyplot as pl\n'), ((11916, 11924), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11922, 11924), True, 'import matplotlib.pyplot as pl\n'), ((11973, 11981), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11979, 11981), True, 'import matplotlib.pyplot as pl\n'), ((12009, 12017), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (12015, 12017), True, 'import matplotlib.pyplot as pl\n'), ((13119, 13133), 'numpy.min', 'np.min', (['xlines'], {}), '(xlines)\n', (13125, 13133), True, 'import numpy as np\n'), ((13134, 13148), 'numpy.max', 'np.max', (['xlines'], {}), '(xlines)\n', (13140, 13148), True, 'import numpy as np\n'), ((15850, 15869), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (15856, 15869), True, 'import numpy as np\n'), ((16845, 16853), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (16851, 16853), True, 'import matplotlib.pyplot as pl\n'), ((6164, 6212), 'scipy.cluster.hierarchy.cophenet', 'scipy.cluster.hierarchy.cophenet', (['partition_tree'], {}), '(partition_tree)\n', (6196, 6212), False, 'import scipy\n'), ((11645, 11653), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11651, 11653), True, 'import matplotlib.pyplot as pl\n'), ((11693, 11701), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11699, 11701), True, 'import matplotlib.pyplot as pl\n'), ((12074, 12082), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (12080, 12082), True, 'import matplotlib.pyplot as pl\n'), ((12160, 12168), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (12166, 12168), True, 'import matplotlib.pyplot as pl\n'), ((12555, 12563), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (12561, 12563), True, 'import matplotlib.pyplot as pl\n'), ((13781, 13796), 'numpy.array', 'np.array', (['xline'], {}), '(xline)\n', (13789, 13796), True, 'import numpy as np\n'), ((16887, 16895), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (16893, 16895), True, 'import matplotlib.pyplot as pl\n'), ((16935, 16943), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (16941, 16943), True, 'import matplotlib.pyplot as pl\n'), ((6989, 7015), 'numpy.delete', 'np.delete', (['values', 'ind2', '(1)'], {}), '(values, ind2, 1)\n', (6998, 7015), True, 'import numpy as np\n'), ((11739, 11747), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11745, 11747), True, 'import matplotlib.pyplot as pl\n'), ((11787, 11795), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11793, 11795), True, 'import matplotlib.pyplot as pl\n'), ((16981, 16989), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (16987, 16989), True, 'import matplotlib.pyplot as pl\n'), ((17029, 17037), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (17035, 17037), True, 'import matplotlib.pyplot as pl\n'), ((5970, 5984), 'numpy.abs', 'np.abs', (['values'], {}), '(values)\n', (5976, 5984), True, 'import numpy as np\n'), ((11869, 11877), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (11875, 11877), True, 'import matplotlib.pyplot as pl\n'), ((13910, 13925), 'numpy.array', 'np.array', (['xline'], {}), '(xline)\n', (13918, 13925), True, 'import numpy as np\n'), ((6822, 6836), 'numpy.abs', 'np.abs', (['values'], {}), '(values)\n', (6828, 6836), True, 'import numpy as np\n'), ((14205, 14220), 'numpy.array', 'np.array', (['yline'], {}), '(yline)\n', (14213, 14220), True, 'import numpy as np\n'), ((7705, 7724), 'numpy.abs', 'np.abs', (['orig_values'], {}), '(orig_values)\n', (7711, 7724), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Tests for `lsst_efd_client` package."""
import numpy
import pandas as pd
import json
import pytest
from kafkit.registry.sansio import MockRegistryApi
from astropy.time import Time, TimeDelta
import astropy.units as u
from aioinflux import InfluxDBClient
import pathlib
from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes
PATH = pathlib.Path(__file__).parent.absolute()
@pytest.fixture
def auth_creds():
"""Sample pytest fixture to construct credentials to run tests on.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
return NotebookAuth().get_auth('test_efd')
@pytest.fixture
def auth_client():
"""Sample pytest fixture to construct an auth helper to run tests on.
"""
return NotebookAuth()
@pytest.fixture
@pytest.mark.vcr
async def efd_client():
df = pd.read_hdf(PATH/'efd_test.hdf')
async with InfluxDBClient(db='client_test', mode='async', output='dataframe') as client:
await client.create_database()
await client.write(df, measurement='lsst.sal.fooSubSys.test')
efd_client = EfdClient('test_efd', db_name='client_test', client=client)
# Monkey patch the client to point to an existing schema registry
# Note this is only available if on the NCSA VPN
efd_client.schema_registry = 'https://lsst-schema-registry-efd.ncsa.illinois.edu'
yield efd_client
await client.drop_database()
@pytest.fixture
def expected_strs():
expected = []
with open(PATH/'expected.txt', 'r') as fh:
for line in fh.readlines():
expected.append(line)
return expected
@pytest.fixture
def test_df():
return pd.read_hdf(PATH/'efd_test.hdf')
@pytest.fixture
def test_query_res():
return pd.read_hdf(PATH/'packed_data.hdf', key='test_data')
@pytest.fixture
def start_stop():
time = Time('2020-01-28T23:07:19.00', format='isot', scale='utc')
return (time, time + TimeDelta(600, format='sec'))
def test_bad_endpoint():
with pytest.raises(IOError):
NotebookAuth(service_endpoint="https://no.path.here.net.gov")
@pytest.mark.vcr
def test_auth_host(auth_creds):
assert auth_creds[0] == 'foo.bar.baz.net'
@pytest.mark.vcr
def test_auth_registry(auth_creds):
assert auth_creds[1] == 'https://schema-registry-foo.bar.baz'
@pytest.mark.vcr
def test_auth_port(auth_creds):
assert auth_creds[2] == '443'
@pytest.mark.vcr
def test_auth_user(auth_creds):
assert auth_creds[3] == 'foo'
@pytest.mark.vcr
def test_auth_password(auth_creds):
assert auth_creds[4] == 'bar'
@pytest.mark.vcr
def test_auth_list(auth_client):
# Make sure there is at least one set of credentials
# other than the test one used here
assert len(auth_client.list_auth()) > 1
@pytest.mark.vcr
def test_efd_names(auth_client):
# Don't assume same order in case we change
# the backend to something that doesn't
# guarantee that
for name in EfdClient.list_efd_names():
assert name in auth_client.list_auth()
@pytest.mark.vcr
def test_build_query(efd_client, start_stop, expected_strs):
# Check passing a single field works
qstr = efd_client.build_time_range_query('lsst.sal.fooSubSys.test', 'foo',
start_stop[0], start_stop[1])
assert qstr == expected_strs[0].strip()
# Check passing a list of fields works
qstr = efd_client.build_time_range_query('lsst.sal.fooSubSys.test', ['foo', 'bar'],
start_stop[0], start_stop[1])
assert qstr == expected_strs[1].strip()
@pytest.mark.vcr
def test_build_query_delta(efd_client, start_stop, expected_strs):
tdelta = TimeDelta(250, format='sec')
# Check passing a time delta works
qstr = efd_client.build_time_range_query('lsst.sal.fooSubSys.test', ['foo', 'bar'],
start_stop[0], tdelta)
assert qstr == expected_strs[2].strip()
# Check passing a time delta as a window works
qstr = efd_client.build_time_range_query('lsst.sal.fooSubSys.test', ['foo', 'bar'],
start_stop[0], tdelta, is_window=True)
assert qstr == expected_strs[3].strip()
@pytest.mark.asyncio
async def test_parse_schema(efd_client):
"""Test the EfdClient._parse_schema method."""
# Body that we expect the registry API to return given the request.
expected_body = {
"schema": json.dumps(
{
"name": "schema1",
"type": "record",
"fields": [{"name": "a", "type": "int", "description": "Description 1", "units": "second"},
{"name": "b", "type": "double", "description": "Description 2", "units": "meter"},
{"name": "c", "type": "float", "description": "Description 3", "units": "gram"},
{"name": "d", "type": "string", "description": "Description 4", "units": "torr"}
],
}
),
"subject": "schema1",
"version": 1,
"id": 2,
}
body = json.dumps(expected_body).encode("utf-8")
client = MockRegistryApi(body=body)
schema = await client.get_schema_by_subject("schema1")
result = efd_client._parse_schema("schema1", schema)
assert isinstance(result, pd.DataFrame)
for i, l in enumerate('abcd'):
assert result['name'][i] == l
for i in range(4):
assert result['description'][i] == f'Description {i+1}'
assert 'units' in result.columns
assert 'aunits' in result.columns
assert 'type' not in result.columns
for unit in result['aunits']:
assert isinstance(unit, u.UnitBase)
@pytest.mark.asyncio
@pytest.mark.vcr
async def test_topics(efd_client):
topics = await efd_client.get_topics()
assert len(topics) == 1
assert topics[0] == 'lsst.sal.fooSubSys.test'
@pytest.mark.asyncio
@pytest.mark.vcr
async def test_fields(efd_client, test_df):
columns = await efd_client.get_fields('lsst.sal.fooSubSys.test')
for c in test_df.columns:
assert c in columns
@pytest.mark.asyncio
#<EMAIL>
async def test_time_series(efd_client, start_stop):
df = await efd_client.select_time_series('lsst.sal.fooSubSys.test', ['foo', 'bar'],
start_stop[0], start_stop[1])
assert len(df) == 600
for c in ['foo', 'bar']:
assert c in df.columns
df_legacy = await efd_client.select_time_series('lsst.sal.fooSubSys.test', ['foo', 'bar'],
start_stop[0], start_stop[1], convert_influx_index=True)
# Test that df_legacy is in UTC assuming df was in TAI
t = Time(df.index).unix - Time(df_legacy.index).unix
assert numpy.all(t == 0.) # The indexes should all be the same since both the time range and
# index were shifted
# But the queries should be different
assert not efd_client.query_history[-2] == efd_client.query_history[-1]
@pytest.mark.asyncio
@pytest.mark.vcr
async def test_top_n(efd_client, start_stop):
df = await efd_client.select_top_n('lsst.sal.fooSubSys.test', ['foo', 'bar'], 10)
assert len(df) == 10
for c in ['foo', 'bar']:
assert c in df.columns
df_legacy = await efd_client.select_top_n('lsst.sal.fooSubSys.test', ['foo', 'bar'], 10, convert_influx_index=True)
# Test that df_legacy is in UTC assuming df was in TAI
t = Time(df.index).unix - Time(df_legacy.index).unix
assert numpy.all(t == 37.)
df = await efd_client.select_top_n('lsst.sal.fooSubSys.test', ['foo', 'bar'], 10, time_cut=start_stop[0])
assert len(df) == 10
for c in ['foo', 'bar']:
assert c in df.columns
assert df['foo'].values[0] == 144.11835565266966
assert df['bar'].values[0] == 631.1982694645203
assert df['foo'].values[-1] == 180.95267940509046
assert df['bar'].values[-1] == 314.7001662962593
@pytest.mark.asyncio
@pytest.mark.vcr
async def test_packed_time_series(efd_client, start_stop, test_query_res):
df_exp = test_query_res
df = await efd_client.select_packed_time_series('lsst.sal.fooSubSys.test', ['ham', 'egg', 'hamegg'],
start_stop[0], start_stop[1])
# The column 'times' holds the input to the packed time index.
# It's typically in TAI, but the returned index should be in UTC
assert numpy.all((numpy.array(df['times']) - Time(df.index).unix) == 37.)
assert numpy.all((df.index[1:] - df.index[:-1]).total_seconds() > 0)
assert numpy.all(df == df_exp)
for c in ['ham', 'egg']:
assert c in df.columns
def test_resample(test_query_res):
df = test_query_res
df_copy = df.copy()
df_copy.set_index(df_copy.index + pd.Timedelta(0.05, unit='s'), inplace=True)
df_out = resample(df, df_copy)
assert len(df_out) == 2*len(df)
def test_rendezvous(test_df):
sub = test_df.iloc[[25, 75], :]
# this makes sure the index is not the same, which is the
# point of this helper method
sub.set_index(sub.index + pd.Timedelta(0.5, unit='s'), inplace=True)
merged = rendezvous_dataframes(test_df, sub)
for i, rec in enumerate(merged.iterrows()):
if i < 26:
assert numpy.isnan(rec[1]['ham0_y'])
assert numpy.isnan(rec[1]['egg0_y'])
elif i > 25 and i < 76:
assert rec[1]['ham0_y'] == sub['ham0'][0]
assert rec[1]['egg0_y'] == sub['egg0'][0]
elif i > 75:
assert rec[1]['ham0_y'] == sub['ham0'][1]
assert rec[1]['egg0_y'] == sub['egg0'][1]
| [
"lsst_efd_client.rendezvous_dataframes",
"lsst_efd_client.EfdClient",
"pandas.read_hdf",
"aioinflux.InfluxDBClient",
"lsst_efd_client.EfdClient.list_efd_names",
"kafkit.registry.sansio.MockRegistryApi",
"astropy.time.Time",
"astropy.time.TimeDelta",
"json.dumps",
"lsst_efd_client.resample",
"num... | [((786, 800), 'lsst_efd_client.NotebookAuth', 'NotebookAuth', ([], {}), '()\n', (798, 800), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((869, 903), 'pandas.read_hdf', 'pd.read_hdf', (["(PATH / 'efd_test.hdf')"], {}), "(PATH / 'efd_test.hdf')\n", (880, 903), True, 'import pandas as pd\n'), ((1706, 1740), 'pandas.read_hdf', 'pd.read_hdf', (["(PATH / 'efd_test.hdf')"], {}), "(PATH / 'efd_test.hdf')\n", (1717, 1740), True, 'import pandas as pd\n'), ((1789, 1843), 'pandas.read_hdf', 'pd.read_hdf', (["(PATH / 'packed_data.hdf')"], {'key': '"""test_data"""'}), "(PATH / 'packed_data.hdf', key='test_data')\n", (1800, 1843), True, 'import pandas as pd\n'), ((1888, 1946), 'astropy.time.Time', 'Time', (['"""2020-01-28T23:07:19.00"""'], {'format': '"""isot"""', 'scale': '"""utc"""'}), "('2020-01-28T23:07:19.00', format='isot', scale='utc')\n", (1892, 1946), False, 'from astropy.time import Time, TimeDelta\n'), ((2983, 3009), 'lsst_efd_client.EfdClient.list_efd_names', 'EfdClient.list_efd_names', ([], {}), '()\n', (3007, 3009), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((3726, 3754), 'astropy.time.TimeDelta', 'TimeDelta', (['(250)'], {'format': '"""sec"""'}), "(250, format='sec')\n", (3735, 3754), False, 'from astropy.time import Time, TimeDelta\n'), ((5216, 5242), 'kafkit.registry.sansio.MockRegistryApi', 'MockRegistryApi', ([], {'body': 'body'}), '(body=body)\n', (5231, 5242), False, 'from kafkit.registry.sansio import MockRegistryApi\n'), ((6828, 6847), 'numpy.all', 'numpy.all', (['(t == 0.0)'], {}), '(t == 0.0)\n', (6837, 6847), False, 'import numpy\n'), ((7591, 7611), 'numpy.all', 'numpy.all', (['(t == 37.0)'], {}), '(t == 37.0)\n', (7600, 7611), False, 'import numpy\n'), ((8646, 8669), 'numpy.all', 'numpy.all', (['(df == df_exp)'], {}), '(df == df_exp)\n', (8655, 8669), False, 'import numpy\n'), ((8910, 8931), 'lsst_efd_client.resample', 'resample', (['df', 'df_copy'], {}), '(df, df_copy)\n', (8918, 8931), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((9218, 9253), 'lsst_efd_client.rendezvous_dataframes', 'rendezvous_dataframes', (['test_df', 'sub'], {}), '(test_df, sub)\n', (9239, 9253), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((917, 983), 'aioinflux.InfluxDBClient', 'InfluxDBClient', ([], {'db': '"""client_test"""', 'mode': '"""async"""', 'output': '"""dataframe"""'}), "(db='client_test', mode='async', output='dataframe')\n", (931, 983), False, 'from aioinflux import InfluxDBClient\n'), ((1125, 1184), 'lsst_efd_client.EfdClient', 'EfdClient', (['"""test_efd"""'], {'db_name': '"""client_test"""', 'client': 'client'}), "('test_efd', db_name='client_test', client=client)\n", (1134, 1184), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((2038, 2060), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (2051, 2060), False, 'import pytest\n'), ((2070, 2131), 'lsst_efd_client.NotebookAuth', 'NotebookAuth', ([], {'service_endpoint': '"""https://no.path.here.net.gov"""'}), "(service_endpoint='https://no.path.here.net.gov')\n", (2082, 2131), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((4488, 4897), 'json.dumps', 'json.dumps', (["{'name': 'schema1', 'type': 'record', 'fields': [{'name': 'a', 'type':\n 'int', 'description': 'Description 1', 'units': 'second'}, {'name': 'b',\n 'type': 'double', 'description': 'Description 2', 'units': 'meter'}, {\n 'name': 'c', 'type': 'float', 'description': 'Description 3', 'units':\n 'gram'}, {'name': 'd', 'type': 'string', 'description': 'Description 4',\n 'units': 'torr'}]}"], {}), "({'name': 'schema1', 'type': 'record', 'fields': [{'name': 'a',\n 'type': 'int', 'description': 'Description 1', 'units': 'second'}, {\n 'name': 'b', 'type': 'double', 'description': 'Description 2', 'units':\n 'meter'}, {'name': 'c', 'type': 'float', 'description': 'Description 3',\n 'units': 'gram'}, {'name': 'd', 'type': 'string', 'description':\n 'Description 4', 'units': 'torr'}]})\n", (4498, 4897), False, 'import json\n'), ((390, 412), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (402, 412), False, 'import pathlib\n'), ((620, 634), 'lsst_efd_client.NotebookAuth', 'NotebookAuth', ([], {}), '()\n', (632, 634), False, 'from lsst_efd_client import NotebookAuth, EfdClient, resample, rendezvous_dataframes\n'), ((1972, 2000), 'astropy.time.TimeDelta', 'TimeDelta', (['(600)'], {'format': '"""sec"""'}), "(600, format='sec')\n", (1981, 2000), False, 'from astropy.time import Time, TimeDelta\n'), ((5161, 5186), 'json.dumps', 'json.dumps', (['expected_body'], {}), '(expected_body)\n', (5171, 5186), False, 'import json\n'), ((6768, 6782), 'astropy.time.Time', 'Time', (['df.index'], {}), '(df.index)\n', (6772, 6782), False, 'from astropy.time import Time, TimeDelta\n'), ((6790, 6811), 'astropy.time.Time', 'Time', (['df_legacy.index'], {}), '(df_legacy.index)\n', (6794, 6811), False, 'from astropy.time import Time, TimeDelta\n'), ((7531, 7545), 'astropy.time.Time', 'Time', (['df.index'], {}), '(df.index)\n', (7535, 7545), False, 'from astropy.time import Time, TimeDelta\n'), ((7553, 7574), 'astropy.time.Time', 'Time', (['df_legacy.index'], {}), '(df_legacy.index)\n', (7557, 7574), False, 'from astropy.time import Time, TimeDelta\n'), ((8853, 8881), 'pandas.Timedelta', 'pd.Timedelta', (['(0.05)'], {'unit': '"""s"""'}), "(0.05, unit='s')\n", (8865, 8881), True, 'import pandas as pd\n'), ((9162, 9189), 'pandas.Timedelta', 'pd.Timedelta', (['(0.5)'], {'unit': '"""s"""'}), "(0.5, unit='s')\n", (9174, 9189), True, 'import pandas as pd\n'), ((9340, 9369), 'numpy.isnan', 'numpy.isnan', (["rec[1]['ham0_y']"], {}), "(rec[1]['ham0_y'])\n", (9351, 9369), False, 'import numpy\n'), ((9389, 9418), 'numpy.isnan', 'numpy.isnan', (["rec[1]['egg0_y']"], {}), "(rec[1]['egg0_y'])\n", (9400, 9418), False, 'import numpy\n'), ((8506, 8530), 'numpy.array', 'numpy.array', (["df['times']"], {}), "(df['times'])\n", (8517, 8530), False, 'import numpy\n'), ((8533, 8547), 'astropy.time.Time', 'Time', (['df.index'], {}), '(df.index)\n', (8537, 8547), False, 'from astropy.time import Time, TimeDelta\n')] |
from auto_pilot.localization.filter import Filter
from auto_pilot.data.world_map import WorldMap
from auto_pilot.data.senseRet import SensorRet
from auto_pilot.data.motion import Motion
from auto_pilot.data.coordinates import Coordinates
from auto_pilot.common.param import Param
from auto_pilot.common.util import region_similarity
from overrides import overrides
import numpy as np
from typing import List
@Filter.register("histogram")
class Histogram(Filter):
"""
discrete, 2D
"""
def __init__(self, match_prob: float, non_match_prob: List[float],
hit_prob: float, miss_prob: List[float]):
self.__match_prob = None
self.__non_match_prob = None
self.__hit_prob = None
self.__miss_prob = None
self.set_noise(match_prob, non_match_prob, hit_prob, miss_prob)
@overrides
def set_noise(self, match_prob: float, hit_prob: float, miss_prob: List[float]):
"""
miss_prob is a 4-element list, following the order of up, right, down, left
"""
assert sum([hit_prob] + miss_prob) == 1, "Histogram Filter motion params should add up to 1"
# sensing params
self.__match_prob = match_prob
# motion params
self.__hit_prob = hit_prob
self.__miss_prob = miss_prob
@overrides
def sensing_update_prob(self, info: SensorRet, prob_mat: np.matrix, world: WorldMap) -> np.matrix:
"""
Update probability matrix by the similarity of new observation and world map.
:return: normalized probability matrix
"""
q = np.matrix(np.zeros(prob_mat.shape))
for i in range(len(prob_mat)):
for j in range(len(prob_mat[0])):
sim = region_similarity(info, world, Coordinates(i, j))
q[i, j] = prob_mat[i, j] * sim * self.match_prob
return q/q.sum()
@overrides
def motion_update_prob(self, motion: Motion, prob_mat: np.matrix) -> np.matrix:
"""
Update probability matrix by landing place prediction after motion.
:return: non-normalized probability matrix
"""
q = np.matrix(np.zeros(prob_mat.shape))
for i in range(len(prob_mat)):
for j in range(len(prob_mat[0])):
s = self.hit_prob * prob_mat[(Coordinates(i, j) - motion) % prob_mat.shape]
for index, bias in enumerate([(-1, 0), (0, 1), (1, 0), (0, -1)]):
s += self.__miss_prob[index] * \
prob_mat[(Coordinates(i, j) - motion - Coordinates(bias[0], bias[1])) % prob_mat.shape]
q[i, j] = s
return q
@classmethod
def from_params(cls, param: Param) -> 'Histogram':
match_prob: float = param.pop("match_prob")
non_match_prob: List[float] = param.pop("non_match_prob")
hit_prob: float = param.pop("hit_prob")
miss_prob: List[float] = param.pop("miss_prob")
return cls(
match_prob=match_prob, non_match_prob=non_match_prob, hit_prob=hit_prob, miss_prob=miss_prob
)
| [
"numpy.zeros",
"auto_pilot.localization.filter.Filter.register",
"auto_pilot.data.coordinates.Coordinates"
] | [((411, 439), 'auto_pilot.localization.filter.Filter.register', 'Filter.register', (['"""histogram"""'], {}), "('histogram')\n", (426, 439), False, 'from auto_pilot.localization.filter import Filter\n'), ((1603, 1627), 'numpy.zeros', 'np.zeros', (['prob_mat.shape'], {}), '(prob_mat.shape)\n', (1611, 1627), True, 'import numpy as np\n'), ((2150, 2174), 'numpy.zeros', 'np.zeros', (['prob_mat.shape'], {}), '(prob_mat.shape)\n', (2158, 2174), True, 'import numpy as np\n'), ((1767, 1784), 'auto_pilot.data.coordinates.Coordinates', 'Coordinates', (['i', 'j'], {}), '(i, j)\n', (1778, 1784), False, 'from auto_pilot.data.coordinates import Coordinates\n'), ((2307, 2324), 'auto_pilot.data.coordinates.Coordinates', 'Coordinates', (['i', 'j'], {}), '(i, j)\n', (2318, 2324), False, 'from auto_pilot.data.coordinates import Coordinates\n'), ((2552, 2581), 'auto_pilot.data.coordinates.Coordinates', 'Coordinates', (['bias[0]', 'bias[1]'], {}), '(bias[0], bias[1])\n', (2563, 2581), False, 'from auto_pilot.data.coordinates import Coordinates\n'), ((2523, 2540), 'auto_pilot.data.coordinates.Coordinates', 'Coordinates', (['i', 'j'], {}), '(i, j)\n', (2534, 2540), False, 'from auto_pilot.data.coordinates import Coordinates\n')] |
from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
import pyro
import torch
from sbibm.utils.io import get_tensor_from_csv, save_tensor_to_csv
from sbibm.utils.pyro import get_log_prob_fn, get_log_prob_grad_fn
class Task:
def __init__(
self,
dim_data: int,
dim_parameters: int,
name: str,
num_observations: int,
num_posterior_samples: List[int],
num_simulations: List[int],
path: Path,
name_display: Optional[str] = None,
num_reference_posterior_samples: int = None,
observation_seeds: Optional[List[int]] = None,
):
"""Base class for tasks.
Args:
dim_data: Dimensionality of data.
dim_parameters: Dimensionality of parameters.
name: Name of task. Should be the name of the folder in which
the task is stored. Used with `sbibm.get_task(name)`.
num_observations: Number of different observations for this task.
num_posterior_samples: Number of posterior samples to generate.
num_simulations: List containing number of different simulations to
run this task for.
path: Path to folder of task.
name_display: Display name of task, with correct upper/lower-case
spelling and spaces. Defaults to `name`.
num_reference_posterior_samples: Number of reference posterior samples
to generate for this task. Defaults to `num_posterior_samples`.
observation_seeds: List of observation seeds to use. Defaults to
a sequence of length `num_observations`. Override to use specific
seeds.
"""
self.dim_data = dim_data
self.dim_parameters = dim_parameters
self.name = name
self.num_observations = num_observations
self.num_posterior_samples = num_posterior_samples
self.num_simulations = num_simulations
self.path = path
self.name_display = name_display if name_display is not None else name
self.num_reference_posterior_samples = (
num_reference_posterior_samples
if num_reference_posterior_samples is not None
else num_posterior_samples
)
self.observation_seeds = (
observation_seeds
if observation_seeds is not None
else [i + 1000000 for i in range(self.num_observations)]
)
@abstractmethod
def get_prior(self) -> Callable:
"""Get function returning parameters from prior"""
raise NotImplementedError
def get_prior_dist(self) -> torch.distributions.Distribution:
"""Get prior distribution"""
return self.prior_dist
def get_prior_params(self) -> Dict[str, torch.Tensor]:
"""Get parameters of prior distribution"""
return self.prior_params
def get_labels_data(self) -> List[str]:
"""Get list containing parameter labels"""
return [f"data_{i+1}" for i in range(self.dim_data)]
def get_labels_parameters(self) -> List[str]:
"""Get list containing parameter labels"""
return [f"parameter_{i+1}" for i in range(self.dim_parameters)]
def get_observation(self, num_observation: int) -> torch.Tensor:
"""Get observed data for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "observation.csv"
)
return get_tensor_from_csv(path)
def get_reference_posterior_samples(self, num_observation: int) -> torch.Tensor:
"""Get reference posterior samples for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "reference_posterior_samples.csv.bz2"
)
return get_tensor_from_csv(path)
@abstractmethod
def get_simulator(self) -> Callable:
"""Get function returning parameters from prior"""
raise NotImplementedError
def get_true_parameters(self, num_observation: int) -> torch.Tensor:
"""Get true parameters (parameters that generated the data) for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "true_parameters.csv"
)
return get_tensor_from_csv(path)
def save_data(self, path: Union[str, Path], data: torch.Tensor):
"""Save data to a given path"""
save_tensor_to_csv(path, data, self.get_labels_data())
def save_parameters(self, path: Union[str, Path], parameters: torch.Tensor):
"""Save parameters to a given path"""
save_tensor_to_csv(path, parameters, self.get_labels_parameters())
def flatten_data(self, data: torch.Tensor) -> torch.Tensor:
"""Flattens data
Data returned by the simulator is always flattened into 2D Tensors
"""
return data.reshape(-1, self.dim_data)
def unflatten_data(self, data: torch.Tensor) -> torch.Tensor:
"""Unflattens data
Tasks that require more than 2 dimensions for output of the simulator (e.g.
returning images) may override this method.
"""
return data.reshape(-1, self.dim_data)
def _get_log_prob_fn(
self,
num_observation: Optional[int] = None,
observation: Optional[torch.Tensor] = None,
posterior: bool = True,
implementation: str = "pyro",
**kwargs: Any,
) -> Callable:
"""Gets function returning the unnormalized log probability of the posterior or
likelihood
Args:
num_observation: Observation number
observation: Instead of passing an observation number, an observation may be
passed directly
posterior: If False, will get likelihood instead of posterior
implementation: Implementation to use, `pyro` or `experimental`
kwargs: Additional keywords passed to `sbibm.utils.pyro.get_log_prob_fn`
Returns:
`log_prob_fn` that returns log probablities as `batch_size`
"""
assert not (num_observation is None and observation is None)
assert not (num_observation is not None and observation is not None)
assert type(posterior) is bool
conditioned_model = self._get_pyro_model(
num_observation=num_observation,
observation=observation,
posterior=posterior,
)
log_prob_fn, _ = get_log_prob_fn(
conditioned_model,
implementation=implementation,
**kwargs,
)
def log_prob_pyro(parameters):
assert parameters.ndim == 2
num_parameters = parameters.shape[0]
if num_parameters == 1:
return log_prob_fn({"parameters": parameters})
else:
log_probs = []
for i in range(num_parameters):
log_probs.append(
log_prob_fn({"parameters": parameters[i, :].reshape(1, -1)})
)
return torch.cat(log_probs)
def log_prob_experimental(parameters):
return log_prob_fn({"parameters": parameters})
if implementation == "pyro":
return log_prob_pyro
elif implementation == "experimental":
return log_prob_experimental
else:
raise NotImplementedError
def _get_log_prob_grad_fn(
self,
num_observation: Optional[int] = None,
observation: Optional[torch.Tensor] = None,
posterior: bool = True,
implementation: str = "pyro",
**kwargs: Any,
) -> Callable:
"""Gets function returning the unnormalized log probability of the posterior
Args:
num_observation: Observation number
observation: Instead of passing an observation number, an observation may be
passed directly
posterior: If False, will get likelihood instead of posterior
implementation: Implementation to use, `pyro` or `experimental`
kwargs: Passed to `sbibm.utils.pyro.get_log_prob_grad_fn`
Returns:
`log_prob_grad_fn` that returns gradients as `batch_size` x
`dim_parameter`
"""
assert not (num_observation is None and observation is None)
assert not (num_observation is not None and observation is not None)
assert type(posterior) is bool
assert implementation == "pyro"
conditioned_model = self._get_pyro_model(
num_observation=num_observation,
observation=observation,
posterior=posterior,
)
log_prob_grad_fn, _ = get_log_prob_grad_fn(
conditioned_model,
implementation=implementation,
**kwargs,
)
def log_prob_grad_pyro(parameters):
assert parameters.ndim == 2
num_parameters = parameters.shape[0]
if num_parameters == 1:
grads, _ = log_prob_grad_fn({"parameters": parameters})
return grads["parameters"].reshape(
parameters.shape[0], parameters.shape[1]
)
else:
grads = []
for i in range(num_parameters):
grad, _ = log_prob_grad_fn(
{"parameters": parameters[i, :].reshape(1, -1)}
)
grads.append(grad["parameters"].squeeze())
return torch.stack(grads).reshape(
parameters.shape[0], parameters.shape[1]
)
if implementation == "pyro":
return log_prob_grad_pyro
else:
raise NotImplementedError
def _get_transforms(
self,
automatic_transforms_enabled: bool = True,
num_observation: Optional[int] = 1,
observation: Optional[torch.Tensor] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Gets transforms
Args:
num_observation: Observation number
observation: Instead of passing an observation number, an observation may be
passed directly
automatic_transforms_enabled: If True, will automatically construct
transforms to unconstrained space
Returns:
Dict containing transforms
"""
conditioned_model = self._get_pyro_model(
num_observation=num_observation, observation=observation
)
_, transforms = get_log_prob_fn(
conditioned_model,
automatic_transform_enabled=automatic_transforms_enabled,
)
return transforms
def _get_observation_seed(self, num_observation: int) -> int:
"""Get observation seed for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "observation_seed.csv"
)
return int(pd.read_csv(path)["observation_seed"][0])
def _get_pyro_model(
self,
posterior: bool = True,
num_observation: Optional[int] = None,
observation: Optional[torch.Tensor] = None,
) -> Callable:
"""Get model function for use with Pyro
If `num_observation` or `observation` is passed, the model is conditioned.
Args:
num_observation: Observation number
observation: Instead of passing an observation number, an observation may be
passed directly
posterior: If False, will mask prior which will result in model useful
for calculating log likelihoods instead of log posterior probabilities
"""
assert not (num_observation is not None and observation is not None)
if num_observation is not None:
observation = self.get_observation(num_observation=num_observation)
prior = self.get_prior()
simulator = self.get_simulator()
def model_fn():
prior_ = pyro.poutine.mask(prior, torch.tensor(posterior))
return simulator(prior_())
if observation is not None:
observation = self.unflatten_data(observation)
return pyro.condition(model_fn, {"data": observation})
else:
return model_fn
@abstractmethod
def _sample_reference_posterior(
self,
num_samples: int,
num_observation: Optional[int] = None,
observation: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""Sample reference posterior for given observation
Args:
num_samples: Number of samples
num_observation: Observation number
observation: Instead of passing an observation number, an observation may be
passed directly
Returns:
Samples from reference posterior
"""
raise NotImplementedError
def _save_observation_seed(self, num_observation: int, observation_seed: int):
"""Save observation seed for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "observation_seed.csv"
)
path.parent.mkdir(parents=True, exist_ok=True)
pd.DataFrame(
[[int(observation_seed), int(num_observation)]],
columns=["observation_seed", "num_observation"],
).to_csv(path, index=False)
def _save_observation(self, num_observation: int, observation: torch.Tensor):
"""Save observed data for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "observation.csv"
)
path.parent.mkdir(parents=True, exist_ok=True)
self.save_data(path, observation)
def _save_reference_posterior_samples(
self, num_observation: int, reference_posterior_samples: torch.Tensor
):
"""Save reference posterior samples for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "reference_posterior_samples.csv.bz2"
)
path.parent.mkdir(parents=True, exist_ok=True)
self.save_parameters(path, reference_posterior_samples)
def _save_true_parameters(
self, num_observation: int, true_parameters: torch.Tensor
):
"""Save true parameters (parameters that generated the data) for a given observation number"""
path = (
self.path
/ "files"
/ f"num_observation_{num_observation}"
/ "true_parameters.csv"
)
path.parent.mkdir(parents=True, exist_ok=True)
self.save_parameters(path, true_parameters)
def _setup(self, n_jobs: int = -1, create_reference: bool = True, **kwargs: Any):
"""Setup the task: generate observations and reference posterior samples
In most cases, you don't need to execute this method, since its results are stored to disk.
Re-executing will overwrite existing files.
Args:
n_jobs: Number of to use for Joblib
create_reference: If False, skips reference creation
"""
from joblib import Parallel, delayed
def run(num_observation, observation_seed, **kwargs):
np.random.seed(observation_seed)
torch.manual_seed(observation_seed)
self._save_observation_seed(num_observation, observation_seed)
prior = self.get_prior()
true_parameters = prior(num_samples=1)
self._save_true_parameters(num_observation, true_parameters)
simulator = self.get_simulator()
observation = simulator(true_parameters)
self._save_observation(num_observation, observation)
if create_reference:
reference_posterior_samples = self._sample_reference_posterior(
num_observation=num_observation,
num_samples=self.num_reference_posterior_samples,
**kwargs,
)
num_unique = torch.unique(reference_posterior_samples, dim=0).shape[0]
assert num_unique == self.num_reference_posterior_samples
self._save_reference_posterior_samples(
num_observation,
reference_posterior_samples,
)
Parallel(n_jobs=n_jobs, verbose=50, backend="loky")(
delayed(run)(num_observation, observation_seed, **kwargs)
for num_observation, observation_seed in enumerate(
self.observation_seeds, start=1
)
)
| [
"pyro.condition",
"numpy.random.seed",
"torch.stack",
"torch.unique",
"sbibm.utils.pyro.get_log_prob_grad_fn",
"pandas.read_csv",
"sbibm.utils.io.get_tensor_from_csv",
"torch.manual_seed",
"torch.cat",
"sbibm.utils.pyro.get_log_prob_fn",
"joblib.Parallel",
"joblib.delayed",
"torch.tensor"
] | [((3638, 3663), 'sbibm.utils.io.get_tensor_from_csv', 'get_tensor_from_csv', (['path'], {}), '(path)\n', (3657, 3663), False, 'from sbibm.utils.io import get_tensor_from_csv, save_tensor_to_csv\n'), ((4016, 4041), 'sbibm.utils.io.get_tensor_from_csv', 'get_tensor_from_csv', (['path'], {}), '(path)\n', (4035, 4041), False, 'from sbibm.utils.io import get_tensor_from_csv, save_tensor_to_csv\n'), ((4546, 4571), 'sbibm.utils.io.get_tensor_from_csv', 'get_tensor_from_csv', (['path'], {}), '(path)\n', (4565, 4571), False, 'from sbibm.utils.io import get_tensor_from_csv, save_tensor_to_csv\n'), ((6730, 6805), 'sbibm.utils.pyro.get_log_prob_fn', 'get_log_prob_fn', (['conditioned_model'], {'implementation': 'implementation'}), '(conditioned_model, implementation=implementation, **kwargs)\n', (6745, 6805), False, 'from sbibm.utils.pyro import get_log_prob_fn, get_log_prob_grad_fn\n'), ((8993, 9078), 'sbibm.utils.pyro.get_log_prob_grad_fn', 'get_log_prob_grad_fn', (['conditioned_model'], {'implementation': 'implementation'}), '(conditioned_model, implementation=implementation, **kwargs\n )\n', (9013, 9078), False, 'from sbibm.utils.pyro import get_log_prob_fn, get_log_prob_grad_fn\n'), ((10850, 10947), 'sbibm.utils.pyro.get_log_prob_fn', 'get_log_prob_fn', (['conditioned_model'], {'automatic_transform_enabled': 'automatic_transforms_enabled'}), '(conditioned_model, automatic_transform_enabled=\n automatic_transforms_enabled)\n', (10865, 10947), False, 'from sbibm.utils.pyro import get_log_prob_fn, get_log_prob_grad_fn\n'), ((12569, 12616), 'pyro.condition', 'pyro.condition', (['model_fn', "{'data': observation}"], {}), "(model_fn, {'data': observation})\n", (12583, 12616), False, 'import pyro\n'), ((15773, 15805), 'numpy.random.seed', 'np.random.seed', (['observation_seed'], {}), '(observation_seed)\n', (15787, 15805), True, 'import numpy as np\n'), ((15818, 15853), 'torch.manual_seed', 'torch.manual_seed', (['observation_seed'], {}), '(observation_seed)\n', (15835, 15853), False, 'import torch\n'), ((16870, 16921), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(50)', 'backend': '"""loky"""'}), "(n_jobs=n_jobs, verbose=50, backend='loky')\n", (16878, 16921), False, 'from joblib import Parallel, delayed\n'), ((7347, 7367), 'torch.cat', 'torch.cat', (['log_probs'], {}), '(log_probs)\n', (7356, 7367), False, 'import torch\n'), ((12390, 12413), 'torch.tensor', 'torch.tensor', (['posterior'], {}), '(posterior)\n', (12402, 12413), False, 'import torch\n'), ((11316, 11333), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (11327, 11333), True, 'import pandas as pd\n'), ((16935, 16947), 'joblib.delayed', 'delayed', (['run'], {}), '(run)\n', (16942, 16947), False, 'from joblib import Parallel, delayed\n'), ((9816, 9834), 'torch.stack', 'torch.stack', (['grads'], {}), '(grads)\n', (9827, 9834), False, 'import torch\n'), ((16569, 16617), 'torch.unique', 'torch.unique', (['reference_posterior_samples'], {'dim': '(0)'}), '(reference_posterior_samples, dim=0)\n', (16581, 16617), False, 'import torch\n')] |
"""
.. module::
:synopsis: Module doing stuff...
.. moduleauthor:: <NAME>
"""
from .model import BioNeuron
from odynn import utils
from pylab import plt
import random
import numpy as np
import tensorflow as tf
import collections
MIN_TAU = 1.
MAX_TAU = 1000.
MIN_SCALE = 1.
MAX_SCALE = 200.
# Class for our new model
class HodgHuxSimple(BioNeuron):
# Our model has membrane conductance as its only parameter
default_params = {'C_m': 1., 'g_L': 0.1, 'E_L': -60.,
'g_K': 0.5,
'E_K': 30.,
'a__mdp': -30.,
'a__scale': 20.,
'a__tau': 500.,
'b__mdp': -5.,
'b__scale': -3.,
'b__tau': 30.,
}
default_params = collections.OrderedDict(sorted(default_params.items(), key=lambda t: t[0]))
# Initial value for the voltage
default_init_state = np.array([-60., 0., 1.])
_constraints_dic = {'C_m': [0.5, 40.],
'g_L': [1e-9, 10.],
'g_K': [1e-9, 10.],
'a__scale': [MIN_SCALE, MAX_SCALE],
'a__tau': [MIN_TAU, MAX_TAU],
'b__scale': [-MAX_SCALE, -MIN_SCALE],
'b__tau': [MIN_TAU, MAX_TAU]
}
def __init__(self, init_p, tensors=False, dt=0.1):
BioNeuron.__init__(self, init_p=init_p, tensors=tensors, dt=dt)
def _i_K(self, a, b, V):
return self._param['g_K'] * a**3 * b * (self._param['E_K'] - V)
def _i_L(self, V):
return self._param['g_L'] * (self._param['E_L'] - V)
def step(self, X, i_inj):
# Update the voltage
V = X[0]
a = X[1]
b = X[2]
V = V + self.dt * (i_inj + self._i_L(V) + self._i_K(a, b, V)) / self._param['C_m']
a = self._update_gate(a, 'a', V)
b = self._update_gate(b, 'b', V)
if self._tensors:
return tf.stack([V, a, b], 0)
else:
return np.array([V, a, b])
@staticmethod
def get_random():
# Useful later
return {'C_m': random.uniform(0.5, 40.),
'g_L': random.uniform(1e-5, 10.),
'g_K': random.uniform(1e-5, 10.),
'E_L': random.uniform(-70., -45.),
'E_K': random.uniform(-40., 30.),
'a__tau': random.uniform(MIN_TAU, MAX_TAU),
'a__scale': random.uniform(MIN_SCALE, MAX_SCALE),
'a__mdp': random.uniform(-50., 0.),
'b__tau': random.uniform(MIN_TAU, MAX_TAU),
'b__scale': random.uniform(-MAX_SCALE, -MIN_SCALE),
'b__mdp': random.uniform(-30., 20.),
}
def plot_results(self, ts, i_inj_values, results, ca_true=None, suffix="", show=True, save=False):
V = results[:, 0]
a = results[:, 1]
b = results[:, 2]
il = self._i_L(V)
ik = self._i_K(a, b, V)
plt.figure()
plt.subplot(4, 1, 1)
plt.plot(ts, V, 'k')
plt.title('Leaky Integrator Neuron')
plt.ylabel('V (mV)')
plt.subplot(4, 1, 2)
plt.plot(ts, il, 'g', label='$I_{L}$')
plt.plot(ts, ik, 'c', label='$I_{K}$')
plt.ylabel('Current')
plt.legend()
plt.subplot(4, 1, 3)
plt.plot(ts, a, 'c', label='a')
plt.plot(ts, b, 'b', label='b')
plt.ylabel('Gating Value')
plt.legend()
plt.subplot(4, 1, 4)
plt.plot(ts, i_inj_values, 'b')
plt.xlabel('t (ms)')
plt.ylabel('$I_{inj}$ ($\\mu{A}/cm^2$)')
# plt.ylim(-1, 40)
utils.save_show(show, save, name='Results_{}'.format(suffix), dpi=300) | [
"pylab.plt.ylabel",
"pylab.plt.legend",
"pylab.plt.plot",
"random.uniform",
"pylab.plt.title",
"tensorflow.stack",
"numpy.array",
"pylab.plt.subplot",
"pylab.plt.figure",
"pylab.plt.xlabel"
] | [((956, 983), 'numpy.array', 'np.array', (['[-60.0, 0.0, 1.0]'], {}), '([-60.0, 0.0, 1.0])\n', (964, 983), True, 'import numpy as np\n'), ((3029, 3041), 'pylab.plt.figure', 'plt.figure', ([], {}), '()\n', (3039, 3041), False, 'from pylab import plt\n'), ((3051, 3071), 'pylab.plt.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (3062, 3071), False, 'from pylab import plt\n'), ((3080, 3100), 'pylab.plt.plot', 'plt.plot', (['ts', 'V', '"""k"""'], {}), "(ts, V, 'k')\n", (3088, 3100), False, 'from pylab import plt\n'), ((3109, 3145), 'pylab.plt.title', 'plt.title', (['"""Leaky Integrator Neuron"""'], {}), "('Leaky Integrator Neuron')\n", (3118, 3145), False, 'from pylab import plt\n'), ((3154, 3174), 'pylab.plt.ylabel', 'plt.ylabel', (['"""V (mV)"""'], {}), "('V (mV)')\n", (3164, 3174), False, 'from pylab import plt\n'), ((3184, 3204), 'pylab.plt.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (3195, 3204), False, 'from pylab import plt\n'), ((3213, 3251), 'pylab.plt.plot', 'plt.plot', (['ts', 'il', '"""g"""'], {'label': '"""$I_{L}$"""'}), "(ts, il, 'g', label='$I_{L}$')\n", (3221, 3251), False, 'from pylab import plt\n'), ((3260, 3298), 'pylab.plt.plot', 'plt.plot', (['ts', 'ik', '"""c"""'], {'label': '"""$I_{K}$"""'}), "(ts, ik, 'c', label='$I_{K}$')\n", (3268, 3298), False, 'from pylab import plt\n'), ((3307, 3328), 'pylab.plt.ylabel', 'plt.ylabel', (['"""Current"""'], {}), "('Current')\n", (3317, 3328), False, 'from pylab import plt\n'), ((3337, 3349), 'pylab.plt.legend', 'plt.legend', ([], {}), '()\n', (3347, 3349), False, 'from pylab import plt\n'), ((3359, 3379), 'pylab.plt.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (3370, 3379), False, 'from pylab import plt\n'), ((3388, 3419), 'pylab.plt.plot', 'plt.plot', (['ts', 'a', '"""c"""'], {'label': '"""a"""'}), "(ts, a, 'c', label='a')\n", (3396, 3419), False, 'from pylab import plt\n'), ((3428, 3459), 'pylab.plt.plot', 'plt.plot', (['ts', 'b', '"""b"""'], {'label': '"""b"""'}), "(ts, b, 'b', label='b')\n", (3436, 3459), False, 'from pylab import plt\n'), ((3468, 3494), 'pylab.plt.ylabel', 'plt.ylabel', (['"""Gating Value"""'], {}), "('Gating Value')\n", (3478, 3494), False, 'from pylab import plt\n'), ((3503, 3515), 'pylab.plt.legend', 'plt.legend', ([], {}), '()\n', (3513, 3515), False, 'from pylab import plt\n'), ((3525, 3545), 'pylab.plt.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (3536, 3545), False, 'from pylab import plt\n'), ((3554, 3585), 'pylab.plt.plot', 'plt.plot', (['ts', 'i_inj_values', '"""b"""'], {}), "(ts, i_inj_values, 'b')\n", (3562, 3585), False, 'from pylab import plt\n'), ((3594, 3614), 'pylab.plt.xlabel', 'plt.xlabel', (['"""t (ms)"""'], {}), "('t (ms)')\n", (3604, 3614), False, 'from pylab import plt\n'), ((3623, 3663), 'pylab.plt.ylabel', 'plt.ylabel', (['"""$I_{inj}$ ($\\\\mu{A}/cm^2$)"""'], {}), "('$I_{inj}$ ($\\\\mu{A}/cm^2$)')\n", (3633, 3663), False, 'from pylab import plt\n'), ((2011, 2033), 'tensorflow.stack', 'tf.stack', (['[V, a, b]', '(0)'], {}), '([V, a, b], 0)\n', (2019, 2033), True, 'import tensorflow as tf\n'), ((2067, 2086), 'numpy.array', 'np.array', (['[V, a, b]'], {}), '([V, a, b])\n', (2075, 2086), True, 'import numpy as np\n'), ((2174, 2199), 'random.uniform', 'random.uniform', (['(0.5)', '(40.0)'], {}), '(0.5, 40.0)\n', (2188, 2199), False, 'import random\n'), ((2223, 2250), 'random.uniform', 'random.uniform', (['(1e-05)', '(10.0)'], {}), '(1e-05, 10.0)\n', (2237, 2250), False, 'import random\n'), ((2273, 2300), 'random.uniform', 'random.uniform', (['(1e-05)', '(10.0)'], {}), '(1e-05, 10.0)\n', (2287, 2300), False, 'import random\n'), ((2323, 2351), 'random.uniform', 'random.uniform', (['(-70.0)', '(-45.0)'], {}), '(-70.0, -45.0)\n', (2337, 2351), False, 'import random\n'), ((2374, 2401), 'random.uniform', 'random.uniform', (['(-40.0)', '(30.0)'], {}), '(-40.0, 30.0)\n', (2388, 2401), False, 'import random\n'), ((2427, 2459), 'random.uniform', 'random.uniform', (['MIN_TAU', 'MAX_TAU'], {}), '(MIN_TAU, MAX_TAU)\n', (2441, 2459), False, 'import random\n'), ((2489, 2525), 'random.uniform', 'random.uniform', (['MIN_SCALE', 'MAX_SCALE'], {}), '(MIN_SCALE, MAX_SCALE)\n', (2503, 2525), False, 'import random\n'), ((2553, 2579), 'random.uniform', 'random.uniform', (['(-50.0)', '(0.0)'], {}), '(-50.0, 0.0)\n', (2567, 2579), False, 'import random\n'), ((2605, 2637), 'random.uniform', 'random.uniform', (['MIN_TAU', 'MAX_TAU'], {}), '(MIN_TAU, MAX_TAU)\n', (2619, 2637), False, 'import random\n'), ((2667, 2705), 'random.uniform', 'random.uniform', (['(-MAX_SCALE)', '(-MIN_SCALE)'], {}), '(-MAX_SCALE, -MIN_SCALE)\n', (2681, 2705), False, 'import random\n'), ((2733, 2760), 'random.uniform', 'random.uniform', (['(-30.0)', '(20.0)'], {}), '(-30.0, 20.0)\n', (2747, 2760), False, 'import random\n')] |
from itertools import groupby
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
n = np.array([1, 2, 3, 4, 5, 6])
# Strong scaling
s = np.array([
# distr, run, collection, total
[0.5918669700622559, 529.6803040504456, 9.309041976928711, 8.99303198258082],
[1.8108410835266113, 270.1959412097931, 15.396013259887695, 4.790057881673177],
[2.0383732318878174, 188.6463587284088, 15.88695502281189, 3.442866106828054],
[2.777017116546631, 158.66964769363403, 9.326416969299316, 2.846228917439779],
[3.4988534450531006, 116.17787265777588, 16.39770245552063, 2.267911982536316],
[6.474611043930054, 104.0037693977356, 13.058543682098389, 2.0589545647303265],
])
# Weak scaling
w = np.array([
# distr, run, collection, total
[0.5480248928070068, 219.1293888092041, 5.413635015487671, 3.7515245000521342],
[1.6725523471832275, 217.75201082229614, 7.34630274772644, 3.779518397649129],
[2.059065103530884, 209.39180660247803, 11.491988182067871, 3.7157195170720416],
[2.629925012588501, 256.28443479537964, 16.01390767097473, 4.582146282990774],
[3.8133208751678467, 249.47303009033203, 18.254082679748535, 4.525681483745575],
[4.895831108093262, 434.50403213500977, 22.518120765686035, 7.698641033967336],
])
plt.clf()
plt.figure(figsize=(8, 8))
plt.plot(n, s[:, 0], 'o', linestyle=None, label='1. Distribution')
plt.plot(n, s[:, 1], 'o', linestyle=None, label='2. Run')
plt.plot(n, s[:, 2], 'o', linestyle=None, label='3. Collection')
plt.plot(n, s[:, 3] * 60, 'o', linestyle=None, label='Total')
plt.legend()
plt.xlabel('Machines')
plt.ylabel('Computation Time (s)')
# plt.gca().set_aspect('equal', adjustable='box')
plt.title('Strong Scaling (4 cores per machine)')
# plt.tight_layout()
plt.savefig('strong_scaling.png', bbox_inches='tight', dpi=300)
plt.clf()
plt.figure(figsize=(8, 8))
plt.plot(n, w[:, 0], 'o', linestyle=None, label='1. Distribution')
plt.plot(n, w[:, 1], 'o', linestyle=None, label='2. Run')
plt.plot(n, w[:, 2], 'o', linestyle=None, label='3. Collection')
plt.plot(n, w[:, 3] * 60, 'o', linestyle=None, label='Total')
plt.legend()
plt.xlabel('Machines (x1), Frames (x10)')
plt.ylabel('Computation Time (s)')
# plt.gca().set_aspect('equal', adjustable='box')
plt.title('Weak Scaling (4 cores per machine)')
# plt.tight_layout()
plt.savefig('weak_scaling.png', bbox_inches='tight', dpi=300)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((122, 150), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (130, 150), True, 'import numpy as np\n'), ((172, 693), 'numpy.array', 'np.array', (['[[0.5918669700622559, 529.6803040504456, 9.309041976928711, \n 8.99303198258082], [1.8108410835266113, 270.1959412097931, \n 15.396013259887695, 4.790057881673177], [2.0383732318878174, \n 188.6463587284088, 15.88695502281189, 3.442866106828054], [\n 2.777017116546631, 158.66964769363403, 9.326416969299316, \n 2.846228917439779], [3.4988534450531006, 116.17787265777588, \n 16.39770245552063, 2.267911982536316], [6.474611043930054, \n 104.0037693977356, 13.058543682098389, 2.0589545647303265]]'], {}), '([[0.5918669700622559, 529.6803040504456, 9.309041976928711, \n 8.99303198258082], [1.8108410835266113, 270.1959412097931, \n 15.396013259887695, 4.790057881673177], [2.0383732318878174, \n 188.6463587284088, 15.88695502281189, 3.442866106828054], [\n 2.777017116546631, 158.66964769363403, 9.326416969299316, \n 2.846228917439779], [3.4988534450531006, 116.17787265777588, \n 16.39770245552063, 2.267911982536316], [6.474611043930054, \n 104.0037693977356, 13.058543682098389, 2.0589545647303265]])\n', (180, 693), True, 'import numpy as np\n'), ((741, 1266), 'numpy.array', 'np.array', (['[[0.5480248928070068, 219.1293888092041, 5.413635015487671, \n 3.7515245000521342], [1.6725523471832275, 217.75201082229614, \n 7.34630274772644, 3.779518397649129], [2.059065103530884, \n 209.39180660247803, 11.491988182067871, 3.7157195170720416], [\n 2.629925012588501, 256.28443479537964, 16.01390767097473, \n 4.582146282990774], [3.8133208751678467, 249.47303009033203, \n 18.254082679748535, 4.525681483745575], [4.895831108093262, \n 434.50403213500977, 22.518120765686035, 7.698641033967336]]'], {}), '([[0.5480248928070068, 219.1293888092041, 5.413635015487671, \n 3.7515245000521342], [1.6725523471832275, 217.75201082229614, \n 7.34630274772644, 3.779518397649129], [2.059065103530884, \n 209.39180660247803, 11.491988182067871, 3.7157195170720416], [\n 2.629925012588501, 256.28443479537964, 16.01390767097473, \n 4.582146282990774], [3.8133208751678467, 249.47303009033203, \n 18.254082679748535, 4.525681483745575], [4.895831108093262, \n 434.50403213500977, 22.518120765686035, 7.698641033967336]])\n', (749, 1266), True, 'import numpy as np\n'), ((1296, 1305), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1303, 1305), True, 'import matplotlib.pyplot as plt\n'), ((1306, 1332), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1316, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1399), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 's[:, 0]', '"""o"""'], {'linestyle': 'None', 'label': '"""1. Distribution"""'}), "(n, s[:, 0], 'o', linestyle=None, label='1. Distribution')\n", (1341, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1457), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 's[:, 1]', '"""o"""'], {'linestyle': 'None', 'label': '"""2. Run"""'}), "(n, s[:, 1], 'o', linestyle=None, label='2. Run')\n", (1408, 1457), True, 'import matplotlib.pyplot as plt\n'), ((1458, 1522), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 's[:, 2]', '"""o"""'], {'linestyle': 'None', 'label': '"""3. Collection"""'}), "(n, s[:, 2], 'o', linestyle=None, label='3. Collection')\n", (1466, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1584), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(s[:, 3] * 60)', '"""o"""'], {'linestyle': 'None', 'label': '"""Total"""'}), "(n, s[:, 3] * 60, 'o', linestyle=None, label='Total')\n", (1531, 1584), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1597), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1595, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1620), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Machines"""'], {}), "('Machines')\n", (1608, 1620), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1655), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Computation Time (s)"""'], {}), "('Computation Time (s)')\n", (1631, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1706, 1755), 'matplotlib.pyplot.title', 'plt.title', (['"""Strong Scaling (4 cores per machine)"""'], {}), "('Strong Scaling (4 cores per machine)')\n", (1715, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1840), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""strong_scaling.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('strong_scaling.png', bbox_inches='tight', dpi=300)\n", (1788, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1851), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1849, 1851), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1862, 1878), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1945), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'w[:, 0]', '"""o"""'], {'linestyle': 'None', 'label': '"""1. Distribution"""'}), "(n, w[:, 0], 'o', linestyle=None, label='1. Distribution')\n", (1887, 1945), True, 'import matplotlib.pyplot as plt\n'), ((1946, 2003), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'w[:, 1]', '"""o"""'], {'linestyle': 'None', 'label': '"""2. Run"""'}), "(n, w[:, 1], 'o', linestyle=None, label='2. Run')\n", (1954, 2003), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2068), 'matplotlib.pyplot.plot', 'plt.plot', (['n', 'w[:, 2]', '"""o"""'], {'linestyle': 'None', 'label': '"""3. Collection"""'}), "(n, w[:, 2], 'o', linestyle=None, label='3. Collection')\n", (2012, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2069, 2130), 'matplotlib.pyplot.plot', 'plt.plot', (['n', '(w[:, 3] * 60)', '"""o"""'], {'linestyle': 'None', 'label': '"""Total"""'}), "(n, w[:, 3] * 60, 'o', linestyle=None, label='Total')\n", (2077, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2131, 2143), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2141, 2143), True, 'import matplotlib.pyplot as plt\n'), ((2144, 2185), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Machines (x1), Frames (x10)"""'], {}), "('Machines (x1), Frames (x10)')\n", (2154, 2185), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Computation Time (s)"""'], {}), "('Computation Time (s)')\n", (2196, 2220), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2318), 'matplotlib.pyplot.title', 'plt.title', (['"""Weak Scaling (4 cores per machine)"""'], {}), "('Weak Scaling (4 cores per machine)')\n", (2280, 2318), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""weak_scaling.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('weak_scaling.png', bbox_inches='tight', dpi=300)\n", (2351, 2401), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 29 13:40:36 2018
@author: uqhmin
"""
import numpy as np
import cv2
from skimage.measure import label, regionprops
class Preprocess:
def __init__(self, rawim, im, breast_mask, lesion_mask):
self.raw = rawim
self.image = im
self.mask = breast_mask
self.lesion_mask = lesion_mask
def extract_breast_profile(image,lesion_mask, if_crop):
breast_mask = np.zeros(np.shape(image))
breast_mask[image>0]=1
labelim = label(breast_mask)
props = regionprops(labelim)
# find the largest object as the breast
area = 0
ind = 1
for i in range(0,len(props)):
if area<props[i].filled_area:
area = props[i].filled_area
ind = i+1
breast_mask = np.zeros(np.shape(image))
breast_mask[labelim==ind]=1
labelim = label(breast_mask)
props = regionprops(labelim)
boundingbox = props[0].bbox
# crop the breast mask and mammogram
if if_crop == 1:
breast_mask = breast_mask[boundingbox[0]:boundingbox[2],boundingbox[1]:boundingbox[3]]
breast_raw_image = image[boundingbox[0]:boundingbox[2],boundingbox[1]:boundingbox[3]]
lesion_mask = lesion_mask[boundingbox[0]:boundingbox[2],boundingbox[1]:boundingbox[3]]
else:
breast_raw_image = image
# breast_image = rescale2uint8(breast_raw_image,breast_mask)
breast_image = rescale2uint16(breast_raw_image,breast_mask)
return Preprocess(breast_raw_image,breast_image,breast_mask,lesion_mask)
def rescale2uint8(image,breast_mask):
intensity_in_mask = image[breast_mask>0]
# use top 0.2 percentile to do the strech
maxi = np.percentile(intensity_in_mask,99.8)#np.max(intensity_in_mask)
mini = np.percentile(intensity_in_mask,0.2)#np.min(intensity_in_mask)
# stretch the image into 0~255
image = 255*(image-mini)/(maxi-mini)
image[breast_mask==0] = 0
image[image<0] = 0
image[image>255] = 255
image = np.uint8(image)
return image
def rescale2uint16(image,breast_mask):
intensity_in_mask = image[breast_mask>0]
# use top 0.2 percentile to do the strech
maxi = np.percentile(intensity_in_mask,99.8)#np.max(intensity_in_mask)
mini = np.percentile(intensity_in_mask,0.2)#np.min(intensity_in_mask)
# stretch the image into 0~255
image = 65535*(image-mini)/(maxi-mini)
image[breast_mask==0] = 0
image[image<0] = 0
image[image>65535] = 65535
image = np.uint16(image)
return image
| [
"numpy.uint8",
"numpy.percentile",
"numpy.shape",
"skimage.measure.label",
"numpy.uint16",
"skimage.measure.regionprops"
] | [((1861, 1899), 'numpy.percentile', 'np.percentile', (['intensity_in_mask', '(99.8)'], {}), '(intensity_in_mask, 99.8)\n', (1874, 1899), True, 'import numpy as np\n'), ((1937, 1974), 'numpy.percentile', 'np.percentile', (['intensity_in_mask', '(0.2)'], {}), '(intensity_in_mask, 0.2)\n', (1950, 1974), True, 'import numpy as np\n'), ((2183, 2198), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (2191, 2198), True, 'import numpy as np\n'), ((2375, 2413), 'numpy.percentile', 'np.percentile', (['intensity_in_mask', '(99.8)'], {}), '(intensity_in_mask, 99.8)\n', (2388, 2413), True, 'import numpy as np\n'), ((2451, 2488), 'numpy.percentile', 'np.percentile', (['intensity_in_mask', '(0.2)'], {}), '(intensity_in_mask, 0.2)\n', (2464, 2488), True, 'import numpy as np\n'), ((2703, 2719), 'numpy.uint16', 'np.uint16', (['image'], {}), '(image)\n', (2712, 2719), True, 'import numpy as np\n'), ((565, 583), 'skimage.measure.label', 'label', (['breast_mask'], {}), '(breast_mask)\n', (570, 583), False, 'from skimage.measure import label, regionprops\n'), ((602, 622), 'skimage.measure.regionprops', 'regionprops', (['labelim'], {}), '(labelim)\n', (613, 622), False, 'from skimage.measure import label, regionprops\n'), ((967, 985), 'skimage.measure.label', 'label', (['breast_mask'], {}), '(breast_mask)\n', (972, 985), False, 'from skimage.measure import label, regionprops\n'), ((1011, 1031), 'skimage.measure.regionprops', 'regionprops', (['labelim'], {}), '(labelim)\n', (1022, 1031), False, 'from skimage.measure import label, regionprops\n'), ((487, 502), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (495, 502), True, 'import numpy as np\n'), ((892, 907), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (900, 907), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from repoze.lru import LRUCache
from opensfm import features as ft
logger = logging.getLogger(__name__)
class FeatureLoader(object):
def __init__(self):
self.points_cache = LRUCache(1000)
self.colors_cache = LRUCache(1000)
self.features_cache = LRUCache(200)
self.words_cache = LRUCache(200)
self.masks_cache = LRUCache(1000)
self.index_cache = LRUCache(200)
def clear_cache(self):
self.points_cache.clear()
self.colors_cache.clear()
self.features_cache.clear()
self.words_cache.clear()
self.masks_cache.clear()
def load_points_colors(self, data, image):
points = self.points_cache.get(image)
colors = self.colors_cache.get(image)
if points is None or colors is None:
points, _, colors = self._load_features_nocache(data, image)
self.points_cache.put(image, points)
self.colors_cache.put(image, colors)
return points, colors
def load_masks(self, data, image):
points, _ = self.load_points_colors(data, image)
masks = self.masks_cache.get(image)
if masks is None:
masks = data.load_features_mask(image, points[:, :2])
self.masks_cache.put(image, masks)
return masks
def load_features_index(self, data, image, features):
index = self.index_cache.get(image)
current_features = self.load_points_features_colors(data, image)
use_load = len(current_features) == len(features) and index is None
use_rebuild = len(current_features) != len(features)
if use_load:
index = data.load_feature_index(image, features)
if use_rebuild:
index = ft.build_flann_index(features, data.config)
if use_load or use_rebuild:
self.index_cache.put(image, index)
return index
def load_points_features_colors(self, data, image):
points = self.points_cache.get(image)
features = self.features_cache.get(image)
colors = self.colors_cache.get(image)
if points is None or features is None or colors is None:
points, features, colors = self._load_features_nocache(data, image)
self.points_cache.put(image, points)
self.features_cache.put(image, features)
self.colors_cache.put(image, colors)
return points, features, colors
def load_words(self, data, image):
words = self.words_cache.get(image)
if words is None:
words = data.load_words(image)
self.words_cache.put(image, words)
return words
def _load_features_nocache(self, data, image):
points, features, colors = data.load_features(image)
if points is None:
logger.error('Could not load features for image {}'.format(image))
else:
points = np.array(points[:, :3], dtype=float)
return points, features, colors | [
"opensfm.features.build_flann_index",
"repoze.lru.LRUCache",
"numpy.array",
"logging.getLogger"
] | [((264, 291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import logging\n'), ((375, 389), 'repoze.lru.LRUCache', 'LRUCache', (['(1000)'], {}), '(1000)\n', (383, 389), False, 'from repoze.lru import LRUCache\n'), ((418, 432), 'repoze.lru.LRUCache', 'LRUCache', (['(1000)'], {}), '(1000)\n', (426, 432), False, 'from repoze.lru import LRUCache\n'), ((463, 476), 'repoze.lru.LRUCache', 'LRUCache', (['(200)'], {}), '(200)\n', (471, 476), False, 'from repoze.lru import LRUCache\n'), ((504, 517), 'repoze.lru.LRUCache', 'LRUCache', (['(200)'], {}), '(200)\n', (512, 517), False, 'from repoze.lru import LRUCache\n'), ((545, 559), 'repoze.lru.LRUCache', 'LRUCache', (['(1000)'], {}), '(1000)\n', (553, 559), False, 'from repoze.lru import LRUCache\n'), ((587, 600), 'repoze.lru.LRUCache', 'LRUCache', (['(200)'], {}), '(200)\n', (595, 600), False, 'from repoze.lru import LRUCache\n'), ((1925, 1968), 'opensfm.features.build_flann_index', 'ft.build_flann_index', (['features', 'data.config'], {}), '(features, data.config)\n', (1945, 1968), True, 'from opensfm import features as ft\n'), ((3083, 3119), 'numpy.array', 'np.array', (['points[:, :3]'], {'dtype': 'float'}), '(points[:, :3], dtype=float)\n', (3091, 3119), True, 'import numpy as np\n')] |
import os
import os.path as osp
import pickle
import sys
from collections import defaultdict
import numpy as np
from matplotlib import pyplot as plt
import experiments
import logs as loglib
from source.env.lib.enums import Neon
from source.env.lib.log import InkWell
import seaborn as sns
sns.set()
def plot(x, idxs, label, idx, path):
colors = Neon.color12()
c = colors[idx % 12]
loglib.plot(x, inds=idxs, label=str(idx), c=c.norm)
loglib.godsword()
loglib.save(path + label + '.png')
plt.close()
def plots(ticks, x, label, path, split):
colors = Neon.color12()
for idx, item in enumerate(x.items()):
annID, val = item
c = colors[idx % 12]
idxs, val = compress(val, split)
_, tcks = compress(ticks[idx], split)
loglib.plot(val, inds=idxs, label=str(annID), c=c.norm)
loglib.godsword()
loglib.save(path + label + '.png')
plt.close()
def compress(x, split):
rets, idxs = [], []
if split == 'train':
n = 1 + len(x) // 25
else:
n = 1 + len(x) // 25
for idx in range(0, len(x) - n, n):
rets.append(np.mean(x[idx:(idx + n)]))
idxs.append(idx)
return 10 * np.array(idxs), rets
def popPlots(popLogs, path, split):
idx = 0
print(path)
ticks = popLogs.pop('tick')
for key, val in popLogs.items():
print(key)
plots(ticks, val, str(key), path, split)
idx += 1
def flip(popLogs):
ret = defaultdict(dict)
for annID, logs in popLogs.items():
for key, log in logs.items():
if annID not in ret[key]:
ret[key][annID] = []
if type(log) != list:
ret[key][annID].append(log)
else:
ret[key][annID] += log
return ret
def group(blobs, idmaps):
rets = defaultdict(list)
for blob in blobs:
groupID = idmaps[blob.annID]
rets[groupID].append(blob)
return rets
def mergePops(blobs, idMap):
blobs = group(blobs, idMap)
pops = defaultdict(list)
for groupID, blobList in blobs.items():
pops[groupID] += list(blobList)
return pops
def individual(blobs, logDir, name, accum, split):
savedir = logDir + name + '/' + split + '/'
if not osp.exists(savedir):
os.makedirs(savedir)
blobs = mergePops(blobs, accum)
minLength = min([len(v) for v in blobs.values()])
blobs = {k: v[:minLength] for k, v in blobs.items()}
popLogs = {}
for annID, blobList in blobs.items():
logs, blobList = {}, list(blobList)
logs = {**logs, **InkWell.lifetime(blobList)}
logs = {**logs, **InkWell.reward(blobList)}
logs = {**logs, **InkWell.value(blobList)}
logs = {**logs, **InkWell.tick(blobList)}
logs = {**logs, **InkWell.contact(blobList)}
logs = {**logs, **InkWell.attack(blobList)}
popLogs[annID] = logs
popLogs = flip(popLogs)
popLogs = prepare_avg(popLogs, 'reward')
popPlots(popLogs, savedir, split)
def prepare_avg(dct, key):
dct[key + '_avg'] = {}
lst = list(dct[key].values())
length = min([len(lst[i]) for i in range(len(lst))])
for i in range(len(lst)):
lst[i] = lst[i][:length]
dct[key + '_avg'][0] = np.mean(lst, axis=0)
return dct
def makeAccum(config, form='single'):
assert form in 'pops single split'.split()
if form == 'pops':
return dict((idx, idx) for idx in range(config.NPOP))
elif form == 'single':
return dict((idx, 0) for idx in range(config.NPOP))
elif form == 'split':
pop1 = dict((idx, 0) for idx in range(config.NPOP1))
pop2 = dict((idx, 0) for idx in range(config.NPOP2))
return {**pop1, **pop2}
if __name__ == '__main__':
logDir = 'resource/exps/'
logName = '/model/logs.p'
for name, config in experiments.exps.items():
try:
with open(logDir + name + logName, 'rb') as f:
dat = []
idx = 0
while True:
idx += 1
try:
dat += pickle.load(f)
except EOFError as e:
break
print('Blob length: ', idx)
split = 'test' if config.TEST else 'train'
accum = makeAccum(config, 'pops')
individual(dat, logDir, name, accum, split)
print('Log success: ', name)
except Exception as err:
print(str(err))
| [
"source.env.lib.log.InkWell.attack",
"os.makedirs",
"source.env.lib.log.InkWell.lifetime",
"source.env.lib.log.InkWell.tick",
"matplotlib.pyplot.close",
"logs.godsword",
"source.env.lib.enums.Neon.color12",
"logs.save",
"os.path.exists",
"source.env.lib.log.InkWell.reward",
"source.env.lib.log.I... | [((291, 300), 'seaborn.set', 'sns.set', ([], {}), '()\n', (298, 300), True, 'import seaborn as sns\n'), ((353, 367), 'source.env.lib.enums.Neon.color12', 'Neon.color12', ([], {}), '()\n', (365, 367), False, 'from source.env.lib.enums import Neon\n'), ((453, 470), 'logs.godsword', 'loglib.godsword', ([], {}), '()\n', (468, 470), True, 'import logs as loglib\n'), ((475, 509), 'logs.save', 'loglib.save', (["(path + label + '.png')"], {}), "(path + label + '.png')\n", (486, 509), True, 'import logs as loglib\n'), ((514, 525), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (523, 525), True, 'from matplotlib import pyplot as plt\n'), ((582, 596), 'source.env.lib.enums.Neon.color12', 'Neon.color12', ([], {}), '()\n', (594, 596), False, 'from source.env.lib.enums import Neon\n'), ((850, 867), 'logs.godsword', 'loglib.godsword', ([], {}), '()\n', (865, 867), True, 'import logs as loglib\n'), ((872, 906), 'logs.save', 'loglib.save', (["(path + label + '.png')"], {}), "(path + label + '.png')\n", (883, 906), True, 'import logs as loglib\n'), ((911, 922), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (920, 922), True, 'from matplotlib import pyplot as plt\n'), ((1467, 1484), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1478, 1484), False, 'from collections import defaultdict\n'), ((1827, 1844), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1838, 1844), False, 'from collections import defaultdict\n'), ((2030, 2047), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2041, 2047), False, 'from collections import defaultdict\n'), ((3251, 3271), 'numpy.mean', 'np.mean', (['lst'], {'axis': '(0)'}), '(lst, axis=0)\n', (3258, 3271), True, 'import numpy as np\n'), ((3839, 3863), 'experiments.exps.items', 'experiments.exps.items', ([], {}), '()\n', (3861, 3863), False, 'import experiments\n'), ((2260, 2279), 'os.path.exists', 'osp.exists', (['savedir'], {}), '(savedir)\n', (2270, 2279), True, 'import os.path as osp\n'), ((2289, 2309), 'os.makedirs', 'os.makedirs', (['savedir'], {}), '(savedir)\n', (2300, 2309), False, 'import os\n'), ((1126, 1149), 'numpy.mean', 'np.mean', (['x[idx:idx + n]'], {}), '(x[idx:idx + n])\n', (1133, 1149), True, 'import numpy as np\n'), ((1194, 1208), 'numpy.array', 'np.array', (['idxs'], {}), '(idxs)\n', (1202, 1208), True, 'import numpy as np\n'), ((2587, 2613), 'source.env.lib.log.InkWell.lifetime', 'InkWell.lifetime', (['blobList'], {}), '(blobList)\n', (2603, 2613), False, 'from source.env.lib.log import InkWell\n'), ((2641, 2665), 'source.env.lib.log.InkWell.reward', 'InkWell.reward', (['blobList'], {}), '(blobList)\n', (2655, 2665), False, 'from source.env.lib.log import InkWell\n'), ((2693, 2716), 'source.env.lib.log.InkWell.value', 'InkWell.value', (['blobList'], {}), '(blobList)\n', (2706, 2716), False, 'from source.env.lib.log import InkWell\n'), ((2744, 2766), 'source.env.lib.log.InkWell.tick', 'InkWell.tick', (['blobList'], {}), '(blobList)\n', (2756, 2766), False, 'from source.env.lib.log import InkWell\n'), ((2794, 2819), 'source.env.lib.log.InkWell.contact', 'InkWell.contact', (['blobList'], {}), '(blobList)\n', (2809, 2819), False, 'from source.env.lib.log import InkWell\n'), ((2847, 2871), 'source.env.lib.log.InkWell.attack', 'InkWell.attack', (['blobList'], {}), '(blobList)\n', (2861, 2871), False, 'from source.env.lib.log import InkWell\n'), ((4099, 4113), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4110, 4113), False, 'import pickle\n')] |
# coding: utf-8
# # CareerCon 2019 - Help Navigate Robots
# ## Robots are smart… by design !!
#
# 
#
# ---
#
# Robots are smart… by design. To fully understand and properly navigate a task, however, they need input about their environment.
#
# In this competition, you’ll help robots recognize the floor surface they’re standing on using data collected from Inertial Measurement Units (IMU sensors).
#
# We’ve collected IMU sensor data while driving a small mobile robot over different floor surfaces on the university premises. The task is to predict which one of the nine floor types (carpet, tiles, concrete) the robot is on using sensor data such as acceleration and velocity. Succeed and you'll help improve the navigation of robots without assistance across many different surfaces, so they won’t fall down on the job.
#
# ### Its a golden chance to help humanity, by helping Robots !
#
# <br>
# <img src="https://media2.giphy.com/media/EizPK3InQbrNK/giphy.gif" border="1" width="400" height="300">
# <br>
# # DATA
# **X_[train/test].csv** - the input data, covering 10 sensor channels and 128 measurements per time series plus three ID columns:
#
# - ```row_id```: The ID for this row.
#
# - ```series_id: ID``` number for the measurement series. Foreign key to y_train/sample_submission.
#
# - ```measurement_number```: Measurement number within the series.
#
# The orientation channels encode the current angles how the robot is oriented as a quaternion (see Wikipedia). Angular velocity describes the angle and speed of motion, and linear acceleration components describe how the speed is changing at different times. The 10 sensor channels are:
#
# ```
# orientation_X
#
# orientation_Y
#
# orientation_Z
#
# orientation_W
#
# angular_velocity_X
#
# angular_velocity_Y
#
# angular_velocity_Z
#
# linear_acceleration_X
#
# linear_acceleration_Y
#
# linear_acceleration_Z
# ```
#
# **y_train.csv** - the surfaces for training set.
#
# - ```series_id```: ID number for the measurement series.
#
# - ```group_id```: ID number for all of the measurements taken in a recording session. Provided for the training set only, to enable more cross validation strategies.
#
# - ```surface```: the target for this competition.
#
# **sample_submission.csv** - a sample submission file in the correct format.
# ### Load packages
# In[1]:
import numpy as np
import pandas as pd
import os
from time import time
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from matplotlib import rcParams
get_ipython().run_line_magic('matplotlib', 'inline')
le = preprocessing.LabelEncoder()
from numba import jit
import itertools
from seaborn import countplot,lineplot, barplot
from numba import jit
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn import preprocessing
from scipy.stats import randint as sp_randint
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.style as style
style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
import gc
gc.enable()
get_ipython().system('ls ../input/')
get_ipython().system('ls ../input/robots-best-submission')
print ("Ready !")
# ### Load data
# In[2]:
data = pd.read_csv('../input/career-con-2019/X_train.csv')
tr = pd.read_csv('../input/career-con-2019/X_train.csv')
sub = pd.read_csv('../input/career-con-2019/sample_submission.csv')
test = pd.read_csv('../input/career-con-2019/X_test.csv')
target = pd.read_csv('../input/career-con-2019/y_train.csv')
print ("Data is ready !!")
# # Data exploration
# In[3]:
data.head()
# In[4]:
test.head()
# In[5]:
target.head()
# In[6]:
len(data.measurement_number.value_counts())
# Each series has 128 measurements.
#
# **1 serie = 128 measurements**.
#
# For example, serie with series_id=0 has a surface = *fin_concrete* and 128 measurements.
# ### describe (basic stats)
# In[7]:
data.describe()
# In[8]:
test.describe()
# In[9]:
target.describe()
# ### There is missing data in test and train data
# In[10]:
totalt = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Training")
missing_data.tail()
# In[11]:
totalt = test.isnull().sum().sort_values(ascending=False)
percent = (test.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Test")
missing_data.tail()
# In[12]:
print ("Test has ", (test.shape[0]-data.shape[0])/128, "series more than Train (later I will prove it) = 768 registers")
dif = test.shape[0]-data.shape[0]
print ("Let's check this extra 6 series")
test.tail(768).describe()
# If we look at the features: orientation, angular velocity and linear acceleration, we can see big differences between **max** and **min** from entire test vs 6 extra test's series (see **linear_acceleration_Z**).
#
# Obviously we are comparing 3810 series vs 6 series so this is not a big deal.
# ### goup_id will be important !!
# In[13]:
target.groupby('group_id').surface.nunique().max()
# In[14]:
target['group_id'].nunique()
# **73 groups**
# **Each group_id is a unique recording session and has only one surface type **
# In[15]:
sns.set(style='darkgrid')
sns.countplot(y = 'surface',
data = target,
order = target['surface'].value_counts().index)
plt.show()
# ### Target feature - surface and group_id distribution
# Let's show now the distribution of target feature - surface and group_id.
# by @gpreda.
# In[16]:
fig, ax = plt.subplots(1,1,figsize=(26,8))
tmp = pd.DataFrame(target.groupby(['group_id', 'surface'])['series_id'].count().reset_index())
m = tmp.pivot(index='surface', columns='group_id', values='series_id')
s = sns.heatmap(m, linewidths=.1, linecolor='black', annot=True, cmap="YlGnBu")
s.set_title('Number of surface category per group_id', size=16)
plt.show()
# We need to classify on which surface our robot is standing.
#
# Multi-class Multi-output
#
# 9 classes (suface)
# In[17]:
plt.figure(figsize=(23,5))
sns.set(style="darkgrid")
countplot(x="group_id", data=target, order = target['group_id'].value_counts().index)
plt.show()
# **So, we have 3810 train series, and 3816 test series.
# Let's engineer some features!**
#
# ## Example: Series 1
#
# Let's have a look at the values of features in a single time-series, for example series 1 ```series_id=0```
#
# Click to see all measurements of the **first series**
# In[18]:
serie1 = tr.head(128)
serie1.head()
# In[19]:
serie1.describe()
# In[20]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(serie1.columns[3:]):
plt.subplot(3, 4, i + 1)
plt.plot(serie1[col])
plt.title(col)
# In this example, we can see a quite interesting performance:
# 1. Orientation X increases
# 2. Orientation Y decreases
# 3. We don't see any kind of pattern except for linear_acceleration_Y
#
# And we know that in this series, the robot moved throuh "fine_concrete".
# In[21]:
target.head(1)
# In[22]:
del serie1
gc.collect()
# ## Visualizing Series
#
# Before, I showed you as an example the series 1.
#
# **This code allows you to visualize any series.**
#
# From: *Code Snippet For Visualizing Series Id by @shaz13*
# In[23]:
series_dict = {}
for series in (data['series_id'].unique()):
series_dict[series] = data[data['series_id'] == series]
# In[24]:
def plotSeries(series_id):
style.use('ggplot')
plt.figure(figsize=(28, 16))
print(target[target['series_id'] == series_id]['surface'].values[0].title())
for i, col in enumerate(series_dict[series_id].columns[3:]):
if col.startswith("o"):
color = 'red'
elif col.startswith("a"):
color = 'green'
else:
color = 'blue'
if i >= 7:
i+=1
plt.subplot(3, 4, i + 1)
plt.plot(series_dict[series_id][col], color=color, linewidth=3)
plt.title(col)
# **Now, Let's see code for series 15 ( is an example, try what you want)**
# In[25]:
id_series = 15
plotSeries(id_series)
# In[26]:
del series_dict
gc.collect()
# <br>
# ### Correlations (Part I)
# In[27]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(tr.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# **Correlations test (click "code")**
# In[28]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(test.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# Well, this is immportant, there is a **strong correlation** between:
# - angular_velocity_Z and angular_velocity_Y
# - orientation_X and orientation_Y
# - orientation_Y and orientation_Z
#
# Moreover, test has different correlations than training, for example:
#
# - angular_velocity_Z and orientation_X: -0.1(training) and 0.1(test). Anyway, is too small in both cases, it should not be a problem.
# ## Fourier Analysis
#
# My hope was, that different surface types yield (visible) differences in the frequency spectrum of the sensor measurements.
#
# Machine learning techniques might learn frequency filters on their own, but why don't give the machine a little head start? So I computed the the cyclic FFT for the angular velocity and linear acceleration sensors and plotted mean and standard deviation of the absolute values of the frequency components per training surface category (leaving out the frequency 0 (i.e. constants like sensor bias, earth gravity, ...).
#
# The sensors show some different frequency characterists (see plots below), but unfortunately the surface categories have all similar (to the human eye) shapes, varying mostly in total power, and the standard deviations are high (compared to differences in the means). So there are no nice strong characteristic peaks for surface types. But that does not mean, that there is nothing detectable by more sophisticated statistical methods.
#
# This article http://www.kaggle.com/christoffer/establishing-sampling-frequency makes a convincing case, that the sampling frequency is around 400Hz, so according to that you would see the frequency range to 3-200 Hz in the diagrams (and aliased higher frequencies).
#
# by [@trohwer64](https://www.kaggle.com/trohwer64)
# In[29]:
get_ipython().system('ls ../input')
# In[30]:
train_x = pd.read_csv('../input/career-con-2019/X_train.csv')
train_y = pd.read_csv('../input/career-con-2019/y_train.csv')
# In[31]:
import math
def prepare_data(t):
def f(d):
d=d.sort_values(by=['measurement_number'])
return pd.DataFrame({
'lx':[ d['linear_acceleration_X'].values ],
'ly':[ d['linear_acceleration_Y'].values ],
'lz':[ d['linear_acceleration_Z'].values ],
'ax':[ d['angular_velocity_X'].values ],
'ay':[ d['angular_velocity_Y'].values ],
'az':[ d['angular_velocity_Z'].values ],
})
t= t.groupby('series_id').apply(f)
def mfft(x):
return [ x/math.sqrt(128.0) for x in np.absolute(np.fft.fft(x)) ][1:65]
t['lx_f']=[ mfft(x) for x in t['lx'].values ]
t['ly_f']=[ mfft(x) for x in t['ly'].values ]
t['lz_f']=[ mfft(x) for x in t['lz'].values ]
t['ax_f']=[ mfft(x) for x in t['ax'].values ]
t['ay_f']=[ mfft(x) for x in t['ay'].values ]
t['az_f']=[ mfft(x) for x in t['az'].values ]
return t
# In[32]:
t=prepare_data(train_x)
t=pd.merge(t,train_y[['series_id','surface','group_id']],on='series_id')
t=t.rename(columns={"surface": "y"})
# In[33]:
def aggf(d, feature):
va= np.array(d[feature].tolist())
mean= sum(va)/va.shape[0]
var= sum([ (va[i,:]-mean)**2 for i in range(va.shape[0]) ])/va.shape[0]
dev= [ math.sqrt(x) for x in var ]
return pd.DataFrame({
'mean': [ mean ],
'dev' : [ dev ],
})
display={
'hard_tiles_large_space':'r-.',
'concrete':'g-.',
'tiled':'b-.',
'fine_concrete':'r-',
'wood':'g-',
'carpet':'b-',
'soft_pvc':'y-',
'hard_tiles':'r--',
'soft_tiles':'g--',
}
# In[34]:
import matplotlib.pyplot as plt
plt.figure(figsize=(14, 8*7))
#plt.margins(x=0.0, y=0.0)
#plt.tight_layout()
# plt.figure()
features=['lx_f','ly_f','lz_f','ax_f','ay_f','az_f']
count=0
for feature in features:
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
count+=1
plt.subplot(len(features)+1,1,count)
for i,(k,v) in enumerate(display.items()):
plt.plot(b, stat.at[k,'mean'], v, label=k)
# plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
leg = plt.legend(loc='best', ncol=3, mode="expand", shadow=True, fancybox=True)
plt.title("sensor: " + feature)
plt.xlabel("frequency component")
plt.ylabel("amplitude")
count+=1
plt.subplot(len(features)+1,1,count)
k='concrete'
v=display[k]
feature='lz_f'
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
plt.title("sample for error bars (lz_f, surface concrete)")
plt.xlabel("frequency component")
plt.ylabel("amplitude")
plt.show()
# In[35]:
del train_x, train_y
gc.collect()
# ## Is it an Humanoid Robot instead of a car?
#
# 
#
# **Acceleration**
# - X (mean at 0)
# - Y axis is centered at a value wich shows us the movement (straight ).
# - Z axis is centered at 10 (+- 9.8) wich is the gravity !! , you can see how the robot bounds.
#
# Angular velocity (X,Y,Z) has mean (0,0,0) so there is no lineal movement on those axis (measured with an encoder or potentiometer)
#
# **Fourier**
#
# We can see: with a frequency 3 Hz we can see an acceleration, I think that acceleration represents one step.
# Maybe ee can suppose that every step is caused by many different movements, that's why there are different accelerations at different frequencies.
#
# Angular velocity represents spins.
# Every time the engine/servo spins, the robot does an step - relation between acc y vel.
# ---
#
# # Feature Engineering
# In[36]:
def plot_feature_distribution(df1, df2, label1, label2, features,a=2,b=5):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(17,9))
for feature in features:
i += 1
plt.subplot(a,b,i)
sns.kdeplot(df1[feature], bw=0.5,label=label1)
sns.kdeplot(df2[feature], bw=0.5,label=label2)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[37]:
features = data.columns.values[3:]
plot_feature_distribution(data, test, 'train', 'test', features)
# Godd news, our basic features have the **same distribution (Normal) on test and training**. There are some differences between *orientation_X* , *orientation_Y* and *linear_acceleration_Y*.
#
# I willl try **StandardScaler** to fix this, and remember: orientation , angular velocity and linear acceleration are measured with different units, scaling might be a good choice.
# In[38]:
def plot_feature_class_distribution(classes,tt, features,a=5,b=2):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(16,24))
for feature in features:
i += 1
plt.subplot(a,b,i)
for clas in classes:
ttc = tt[tt['surface']==clas]
sns.kdeplot(ttc[feature], bw=0.5,label=clas)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[39]:
classes = (target['surface'].value_counts()).index
aux = data.merge(target, on='series_id', how='inner')
plot_feature_class_distribution(classes, aux, features)
# **Normal distribution**
#
# There are obviously differences between *surfaces* and that's good, we will focus on that in order to classify them better.
#
# Knowing this differences and that variables follow a normal distribution (in most of the cases) we need to add new features like: ```mean, std, median, range ...``` (for each variable).
#
# However, I will try to fix *orientation_X* and *orientation_Y* as I explained before, scaling and normalizing data.
#
# ---
#
# ### Now with a new scale (more more precision)
# In[40]:
plt.figure(figsize=(26, 16))
for i,col in enumerate(aux.columns[3:13]):
ax = plt.subplot(3,4,i+1)
ax = plt.title(col)
for surface in classes:
surface_feature = aux[aux['surface'] == surface]
sns.kdeplot(surface_feature[col], label = surface)
# ### Histogram for main features
# In[41]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(data.columns[3:]):
ax = plt.subplot(3, 4, i + 1)
sns.distplot(data[col], bins=100, label='train')
sns.distplot(test[col], bins=100, label='test')
ax.legend()
# ## Step 0 : quaternions
# Orientation - quaternion coordinates
# You could notice that there are 4 coordinates: X, Y, Z, W.
#
# Usually we have X, Y, Z - Euler Angles. But Euler Angles are limited by a phenomenon called "gimbal lock," which prevents them from measuring orientation when the pitch angle approaches +/- 90 degrees. Quaternions provide an alternative measurement technique that does not suffer from gimbal lock. Quaternions are less intuitive than Euler Angles and the math can be a little more complicated.
#
# Here are some articles about it:
#
# http://www.chrobotics.com/library/understanding-quaternions
#
# http://www.tobynorris.com/work/prog/csharp/quatview/help/orientations_and_quaternions.htm
#
# Basically 3D coordinates are converted to 4D vectors.
# In[42]:
# https://stackoverflow.com/questions/53033620/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr?rq=1
def quaternion_to_euler(x, y, z, w):
import math
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
X = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
Z = math.atan2(t3, t4)
return X, Y, Z
# In[43]:
def fe_step0 (actual):
# https://www.mathworks.com/help/aeroblks/quaternionnorm.html
# https://www.mathworks.com/help/aeroblks/quaternionmodulus.html
# https://www.mathworks.com/help/aeroblks/quaternionnormalize.html
# Spoiler: you don't need this ;)
actual['norm_quat'] = (actual['orientation_X']**2 + actual['orientation_Y']**2 + actual['orientation_Z']**2 + actual['orientation_W']**2)
actual['mod_quat'] = (actual['norm_quat'])**0.5
actual['norm_X'] = actual['orientation_X'] / actual['mod_quat']
actual['norm_Y'] = actual['orientation_Y'] / actual['mod_quat']
actual['norm_Z'] = actual['orientation_Z'] / actual['mod_quat']
actual['norm_W'] = actual['orientation_W'] / actual['mod_quat']
return actual
#
# > *Are there any reasons to not automatically normalize a quaternion? And if there are, what quaternion operations do result in non-normalized quaternions?*
#
# Any operation that produces a quaternion will need to be normalized because floating-point precession errors will cause it to not be unit length.
# I would advise against standard routines performing normalization automatically for performance reasons.
# Any competent programmer should be aware of the precision issues and be able to normalize the quantities when necessary - and it is not always necessary to have a unit length quaternion.
# The same is true for vector operations.
#
# source: https://stackoverflow.com/questions/11667783/quaternion-and-normalization
# In[44]:
data = fe_step0(data)
test = fe_step0(test)
print(data.shape)
data.head()
# In[45]:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(18, 5))
ax1.set_title('quaternion X')
sns.kdeplot(data['norm_X'], ax=ax1, label="train")
sns.kdeplot(test['norm_X'], ax=ax1, label="test")
ax2.set_title('quaternion Y')
sns.kdeplot(data['norm_Y'], ax=ax2, label="train")
sns.kdeplot(test['norm_Y'], ax=ax2, label="test")
ax3.set_title('quaternion Z')
sns.kdeplot(data['norm_Z'], ax=ax3, label="train")
sns.kdeplot(test['norm_Z'], ax=ax3, label="test")
ax4.set_title('quaternion W')
sns.kdeplot(data['norm_W'], ax=ax4, label="train")
sns.kdeplot(test['norm_W'], ax=ax4, label="test")
plt.show()
# ## Step 1: (x, y, z, w) -> (x,y,z) quaternions to euler angles
# In[46]:
def fe_step1 (actual):
"""Quaternions to Euler Angles"""
x, y, z, w = actual['norm_X'].tolist(), actual['norm_Y'].tolist(), actual['norm_Z'].tolist(), actual['norm_W'].tolist()
nx, ny, nz = [], [], []
for i in range(len(x)):
xx, yy, zz = quaternion_to_euler(x[i], y[i], z[i], w[i])
nx.append(xx)
ny.append(yy)
nz.append(zz)
actual['euler_x'] = nx
actual['euler_y'] = ny
actual['euler_z'] = nz
return actual
# In[47]:
data = fe_step1(data)
test = fe_step1(test)
print (data.shape)
data.head()
# 
# In[48]:
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(15, 5))
ax1.set_title('Roll')
sns.kdeplot(data['euler_x'], ax=ax1, label="train")
sns.kdeplot(test['euler_x'], ax=ax1, label="test")
ax2.set_title('Pitch')
sns.kdeplot(data['euler_y'], ax=ax2, label="train")
sns.kdeplot(test['euler_y'], ax=ax2, label="test")
ax3.set_title('Yaw')
sns.kdeplot(data['euler_z'], ax=ax3, label="train")
sns.kdeplot(test['euler_z'], ax=ax3, label="test")
plt.show()
# **Euler angles** are really important, and we have a problem with Z.
#
# ### Why Orientation_Z (euler angle Z) is so important?
#
# We have a robot moving around, imagine a robot moving straight through different surfaces (each with different features), for example concrete and hard tile floor. Our robot can can **bounce** or **balance** itself a little bit on if the surface is not flat and smooth, that's why we need to work with quaternions and take care of orientation_Z.
#
# 
# In[49]:
data.head()
# ## Step 2: + Basic features
# In[50]:
def feat_eng(data):
df = pd.DataFrame()
data['totl_anglr_vel'] = (data['angular_velocity_X']**2 + data['angular_velocity_Y']**2 + data['angular_velocity_Z']**2)** 0.5
data['totl_linr_acc'] = (data['linear_acceleration_X']**2 + data['linear_acceleration_Y']**2 + data['linear_acceleration_Z']**2)**0.5
data['totl_xyz'] = (data['orientation_X']**2 + data['orientation_Y']**2 + data['orientation_Z']**2)**0.5
data['acc_vs_vel'] = data['totl_linr_acc'] / data['totl_anglr_vel']
def mean_change_of_abs_change(x):
return np.mean(np.diff(np.abs(np.diff(x))))
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
df[col + '_mean'] = data.groupby(['series_id'])[col].mean()
df[col + '_median'] = data.groupby(['series_id'])[col].median()
df[col + '_max'] = data.groupby(['series_id'])[col].max()
df[col + '_min'] = data.groupby(['series_id'])[col].min()
df[col + '_std'] = data.groupby(['series_id'])[col].std()
df[col + '_range'] = df[col + '_max'] - df[col + '_min']
df[col + '_maxtoMin'] = df[col + '_max'] / df[col + '_min']
df[col + '_mean_abs_chg'] = data.groupby(['series_id'])[col].apply(lambda x: np.mean(np.abs(np.diff(x))))
df[col + '_mean_change_of_abs_change'] = data.groupby('series_id')[col].apply(mean_change_of_abs_change)
df[col + '_abs_max'] = data.groupby(['series_id'])[col].apply(lambda x: np.max(np.abs(x)))
df[col + '_abs_min'] = data.groupby(['series_id'])[col].apply(lambda x: np.min(np.abs(x)))
df[col + '_abs_avg'] = (df[col + '_abs_min'] + df[col + '_abs_max'])/2
return df
# In[51]:
get_ipython().run_cell_magic('time', '', 'data = feat_eng(data)\ntest = feat_eng(test)\nprint ("New features: ",data.shape)')
# In[52]:
data.head()
# ## New advanced features
# **Useful functions**
# In[53]:
from scipy.stats import kurtosis
from scipy.stats import skew
def _kurtosis(x):
return kurtosis(x)
def CPT5(x):
den = len(x)*np.exp(np.std(x))
return sum(np.exp(x))/den
def skewness(x):
return skew(x)
def SSC(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
xn_i1 = x[0:len(x)-2] # xn-1
ans = np.heaviside((xn-xn_i1)*(xn-xn_i2),0)
return sum(ans[1:])
def wave_length(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(abs(xn_i2-xn))
def norm_entropy(x):
tresh = 3
return sum(np.power(abs(x),tresh))
def SRAV(x):
SRA = sum(np.sqrt(abs(x)))
return np.power(SRA/len(x),2)
def mean_abs(x):
return sum(abs(x))/len(x)
def zero_crossing(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(np.heaviside(-xn*xn_i2,0))
# This advanced features based on robust statistics.
# In[54]:
def fe_advanced_stats(data):
df = pd.DataFrame()
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
if 'orientation' in col:
continue
print ("FE on column ", col, "...")
df[col + '_skew'] = data.groupby(['series_id'])[col].skew()
df[col + '_mad'] = data.groupby(['series_id'])[col].mad()
df[col + '_q25'] = data.groupby(['series_id'])[col].quantile(0.25)
df[col + '_q75'] = data.groupby(['series_id'])[col].quantile(0.75)
df[col + '_q95'] = data.groupby(['series_id'])[col].quantile(0.95)
df[col + '_iqr'] = df[col + '_q75'] - df[col + '_q25']
df[col + '_CPT5'] = data.groupby(['series_id'])[col].apply(CPT5)
df[col + '_SSC'] = data.groupby(['series_id'])[col].apply(SSC)
df[col + '_skewness'] = data.groupby(['series_id'])[col].apply(skewness)
df[col + '_wave_lenght'] = data.groupby(['series_id'])[col].apply(wave_length)
df[col + '_norm_entropy'] = data.groupby(['series_id'])[col].apply(norm_entropy)
df[col + '_SRAV'] = data.groupby(['series_id'])[col].apply(SRAV)
df[col + '_kurtosis'] = data.groupby(['series_id'])[col].apply(_kurtosis)
df[col + '_zero_crossing'] = data.groupby(['series_id'])[col].apply(zero_crossing)
return df
# - Frequency of the max value
# - Frequency of the min value
# - Count Positive values
# - Count Negative values
# - Count zeros
# In[55]:
basic_fe = ['linear_acceleration_X','linear_acceleration_Y','linear_acceleration_Z',
'angular_velocity_X','angular_velocity_Y','angular_velocity_Z']
# In[56]:
def fe_plus (data):
aux = pd.DataFrame()
for serie in data.index:
#if serie%500 == 0: print ("> Serie = ",serie)
aux = X_train[X_train['series_id']==serie]
for col in basic_fe:
data.loc[serie,col + '_unq'] = aux[col].round(3).nunique()
data.loc[serie,col + 'ratio_unq'] = aux[col].round(3).nunique()/18
try:
data.loc[serie,col + '_freq'] = aux[col].value_counts().idxmax()
except:
data.loc[serie,col + '_freq'] = 0
data.loc[serie,col + '_max_freq'] = aux[aux[col] == aux[col].max()].shape[0]
data.loc[serie,col + '_min_freq'] = aux[aux[col] == aux[col].min()].shape[0]
data.loc[serie,col + '_pos_freq'] = aux[aux[col] >= 0].shape[0]
data.loc[serie,col + '_neg_freq'] = aux[aux[col] < 0].shape[0]
data.loc[serie,col + '_nzeros'] = (aux[col]==0).sum(axis=0)
# ### Important !
# As you can see in this kernel https://www.kaggle.com/anjum48/leakage-within-the-train-dataset
#
# As discussed in the discussion forums (https://www.kaggle.com/c/career-con-2019/discussion/87239#latest-508136) it looks as if each series is part of longer aquisition periods that have been cut up into chunks with 128 samples.
#
# This means that each series is not truely independent and there is leakage between them via the orientation data. Therefore if you have any features that use orientation, you will get a very high CV score due to this leakage in the train set.
#
# [This kernel](https://www.kaggle.com/anjum48/leakage-within-the-train-dataset) will show you how it is possible to get a CV score of 0.992 using only the **orientation data**.
#
# ---
#
# **So I recommend not to use orientation information**
# ## Correlations (Part II)
# In[57]:
#https://stackoverflow.com/questions/17778394/list-highest-correlation-pairs-from-a-large-correlation-matrix-in-pandas
corr_matrix = data.corr().abs()
raw_corr = data.corr()
sol = (corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
.stack()
.sort_values(ascending=False))
top_corr = pd.DataFrame(sol).reset_index()
top_corr.columns = ["var1", "var2", "abs corr"]
# with .abs() we lost the sign, and it's very important.
for x in range(len(top_corr)):
var1 = top_corr.iloc[x]["var1"]
var2 = top_corr.iloc[x]["var2"]
corr = raw_corr[var1][var2]
top_corr.at[x, "raw corr"] = corr
# In[58]:
top_corr.head(15)
# ### Filling missing NAs and infinite data ∞ by zeroes 0
# In[59]:
data.fillna(0,inplace=True)
test.fillna(0,inplace=True)
data.replace(-np.inf,0,inplace=True)
data.replace(np.inf,0,inplace=True)
test.replace(-np.inf,0,inplace=True)
test.replace(np.inf,0,inplace=True)
# ## Label encoding
# In[60]:
target.head()
# In[61]:
target['surface'] = le.fit_transform(target['surface'])
# In[62]:
target['surface'].value_counts()
# In[63]:
target.head()
# # Run Model
# **use random_state at Random Forest**
#
# if you don't use random_state you will get a different solution everytime, sometimes you will be lucky, but other times you will lose your time comparing.
# **Validation Strategy: Stratified KFold**
# In[64]:
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=59)
# In[65]:
predicted = np.zeros((test.shape[0],9))
measured= np.zeros((data.shape[0]))
score = 0
# In[66]:
for times, (trn_idx, val_idx) in enumerate(folds.split(data.values,target['surface'].values)):
model = RandomForestClassifier(n_estimators=500, n_jobs = -1)
#model = RandomForestClassifier(n_estimators=500, max_depth=10, min_samples_split=5, n_jobs=-1)
model.fit(data.iloc[trn_idx],target['surface'][trn_idx])
measured[val_idx] = model.predict(data.iloc[val_idx])
predicted += model.predict_proba(test)/folds.n_splits
score += model.score(data.iloc[val_idx],target['surface'][val_idx])
print("Fold: {} score: {}".format(times,model.score(data.iloc[val_idx],target['surface'][val_idx])))
importances = model.feature_importances_
indices = np.argsort(importances)
features = data.columns
if model.score(data.iloc[val_idx],target['surface'][val_idx]) > 0.92000:
hm = 30
plt.figure(figsize=(7, 10))
plt.title('Feature Importances')
plt.barh(range(len(indices[:hm])), importances[indices][:hm], color='b', align='center')
plt.yticks(range(len(indices[:hm])), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
gc.collect()
# In[67]:
print('Avg Accuracy RF', score / folds.n_splits)
# In[68]:
confusion_matrix(measured,target['surface'])
# ### Confusion Matrix Plot
# In[69]:
# https://www.kaggle.com/artgor/where-do-the-robots-drive
def plot_confusion_matrix(truth, pred, classes, normalize=False, title=''):
cm = confusion_matrix(truth, pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix', size=15)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
plt.tight_layout()
# In[70]:
plot_confusion_matrix(target['surface'], measured, le.classes_)
# ### Submission (Part I)
# In[71]:
sub['surface'] = le.inverse_transform(predicted.argmax(axis=1))
sub.to_csv('submission.csv', index=False)
sub.head()
# ### Best Submission
# In[72]:
best_sub = pd.read_csv('../input/robots-best-submission/final_submission.csv')
best_sub.to_csv('best_submission.csv', index=False)
best_sub.head(10)
# ## References
#
# [1] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b
#
# [2] https://www.kaggle.com/artgor/where-do-the-robots-drive
#
# [3] https://www.kaggle.com/gpreda/robots-need-help
#
# [4] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b by [@vanshjatana](https://www.kaggle.com/vanshjatana)
# # ABOUT Submissions & Leaderboard
# This kernel [distribution hack](https://www.kaggle.com/donkeys/distribution-hack) by [@donkeys](https://www.kaggle.com/donkeys) simply produces 9 output files, one for each target category.
# I submitted each of these to the competition to see how much of each target type exists in the test set distribution. Results:
#
# - carpet 0.06
# - concrete 0.16
# - fine concrete 0.09
# - hard tiles 0.06
# - hard tiles large space 0.10
# - soft pvc 0.17
# - soft tiles 0.23
# - tiled 0.03
# - wood 0.06
#
# Also posted a discussion [thread](https://www.kaggle.com/c/career-con-2019/discussion/85204)
#
#
# **by [@ninoko](https://www.kaggle.com/ninoko)**
#
# I've probed the public leaderboard and this is what I got
# There are much less surfaces like wood or tiled, and much more soft and hard tiles in public leaderboard. This can be issue, why CV and LB results differ strangely.
#
# 
# **I will analyze my best submissions in order to find something interesting.**
#
# Please, feel free to optimize this code.
# In[73]:
sub073 = pd.read_csv('../input/robots-best-submission/mybest0.73.csv')
sub072 = pd.read_csv('../input/robots-best-submission/sub_0.72.csv')
sub072_2 = pd.read_csv('../input/robots-best-submission/sub_0.72_2.csv')
sub071 = pd.read_csv('../input/robots-best-submission/sub_0.71.csv')
sub06 = pd.read_csv('../input/robots-best-submission/sub_0.6.csv')
sub073 = sub073.rename(columns = {'surface':'surface073'})
sub072 = sub072.rename(columns = {'surface':'surface072'})
sub072_2 = sub072_2.rename(columns = {'surface':'surface072_2'})
sub071 = sub071.rename(columns = {'surface':'surface071'})
sub06 = sub06.rename(columns = {'surface':'surface06'})
print ("Submission data is ready")
# In[74]:
sub073.head()
# In[75]:
subtest = pd.concat([sub073['series_id'], sub073['surface073'], sub072['surface072'], sub071['surface071'], sub06['surface06']], axis=1)
subtest.head()
# In[76]:
differents = []
for i in range (0,subtest.shape[0]):
labels = list(subtest.iloc[i,1:])
result = len(set(labels))>1
if result:
differents.append((i, str(labels)))
differents = pd.DataFrame(differents, columns=['idx','group'])
differents.head()
# For example the serie with **series_id = 2** has the following predicition:
#
# ```
# ['tiled', 'tiled', 'tiled', 'fine_concrete']
# ```
#
# This means that my best submissions (*0.73, 0.72 and 0.71 LB* ) predicted the same: **tiled**, but a worst submission (*0.6 LB*) would have predicted **fine_concrete**.
#
# ---
#
# ### So... Why is this interesting?
#
# In order to improve our classification, LB is indicating us wich kind of surfaces are confused with others.
# In that example, ```tiled``` and ```fine_concrete``` are being **confused** (maybe because the two surfaces are **alike**)
#
# ---
#
# As you can see bellow, we have **177 cases of confusion**
# I'm going to plot the tp 10% and see what happens.
# In[77]:
differents['group'].nunique()
# In[78]:
differents['count'] = differents.groupby('group')['group'].transform('count')
differents = differents.sort_values(by=['count'], ascending=False)
differents = differents.drop(['idx'],axis=1)
differents = differents.drop_duplicates()
# In[79]:
differents.head(10)
# We can see that **wood** and **fine_concrete** are really hard to guess.
# ### Maybe this is the most interesting part, the difference between a 0.73 and 0.72 submission.
# In[80]:
differents.tail(10)
# Remember the order at the array is [0.73LB, 0.72LB, 0.71LB, 06LB].
# Series with ```series_id```= 575, 1024, 911, 723, 148, 338 are really interesting because they show important differences between surfaces that often are being confused.
# ## Next Step ??
#
# - I will create a test dataset with those special cases and then I will ad a new CV stage where I will try to classify those surfaces correctly.
# - I will look for surfaces distinctive features.
# ## Generate a new train and test: Fast Fourier Transform Denoising
# In[81]:
from numpy.fft import *
import matplotlib.pyplot as plt
import matplotlib.style as style
style.use('ggplot')
# In[82]:
X_train = pd.read_csv('../input/career-con-2019/X_train.csv')
X_test = pd.read_csv('../input/career-con-2019/X_test.csv')
target = pd.read_csv('../input/career-con-2019/y_train.csv')
# In[83]:
series_dict = {}
for series in (X_train['series_id'].unique()):
series_dict[series] = X_train[X_train['series_id'] == series]
# In[84]:
# From: Code Snippet For Visualizing Series Id by @shaz13
def plotSeries(series_id):
style.use('ggplot')
plt.figure(figsize=(28, 16))
print(target[target['series_id'] == series_id]['surface'].values[0].title())
for i, col in enumerate(series_dict[series_id].columns[3:]):
if col.startswith("o"):
color = 'red'
elif col.startswith("a"):
color = 'green'
else:
color = 'blue'
if i >= 7:
i+=1
plt.subplot(3, 4, i + 1)
plt.plot(series_dict[series_id][col], color=color, linewidth=3)
plt.title(col)
# In[85]:
plotSeries(1)
# In[86]:
# from @theoviel at https://www.kaggle.com/theoviel/fast-fourier-transform-denoising
def filter_signal(signal, threshold=1e3):
fourier = rfft(signal)
frequencies = rfftfreq(signal.size, d=20e-3/signal.size)
fourier[frequencies > threshold] = 0
return irfft(fourier)
# Let's denoise train and test angular_velocity and linear_acceleration data
# In[87]:
X_train_denoised = X_train.copy()
X_test_denoised = X_test.copy()
# train
for col in X_train.columns:
if col[0:3] == 'ang' or col[0:3] == 'lin':
# Apply filter_signal function to the data in each series
denoised_data = X_train.groupby(['series_id'])[col].apply(lambda x: filter_signal(x))
# Assign the denoised data back to X_train
list_denoised_data = []
for arr in denoised_data:
for val in arr:
list_denoised_data.append(val)
X_train_denoised[col] = list_denoised_data
# test
for col in X_test.columns:
if col[0:3] == 'ang' or col[0:3] == 'lin':
# Apply filter_signal function to the data in each series
denoised_data = X_test.groupby(['series_id'])[col].apply(lambda x: filter_signal(x))
# Assign the denoised data back to X_train
list_denoised_data = []
for arr in denoised_data:
for val in arr:
list_denoised_data.append(val)
X_test_denoised[col] = list_denoised_data
# In[88]:
series_dict = {}
for series in (X_train_denoised['series_id'].unique()):
series_dict[series] = X_train_denoised[X_train_denoised['series_id'] == series]
# In[89]:
plotSeries(1)
# In[90]:
plt.figure(figsize=(24, 8))
plt.title('linear_acceleration_X')
plt.plot(X_train.angular_velocity_Z[128:256], label="original");
plt.plot(X_train_denoised.angular_velocity_Z[128:256], label="denoised");
plt.legend()
plt.show()
# **Generate new denoised train and test**
# In[91]:
X_train_denoised.head()
# In[92]:
X_test_denoised.head()
# In[93]:
X_train_denoised.to_csv('test_denoised.csv', index=False)
X_test_denoised.to_csv('train_denoised.csv', index=False)
| [
"matplotlib.pyplot.title",
"numpy.heaviside",
"seaborn.heatmap",
"matplotlib.style.use",
"seaborn.kdeplot",
"math.atan2",
"pandas.read_csv",
"math.asin",
"numpy.abs",
"numpy.ones",
"numpy.argsort",
"gc.collect",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.tick_params",
... | [((2822, 2850), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2848, 2850), False, 'from sklearn import preprocessing\n'), ((3678, 3697), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3687, 3697), True, 'import matplotlib.style as style\n'), ((3715, 3748), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3738, 3748), False, 'import warnings\n'), ((3759, 3770), 'gc.enable', 'gc.enable', ([], {}), '()\n', (3768, 3770), False, 'import gc\n'), ((3923, 3974), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_train.csv"""'], {}), "('../input/career-con-2019/X_train.csv')\n", (3934, 3974), True, 'import pandas as pd\n'), ((3980, 4031), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_train.csv"""'], {}), "('../input/career-con-2019/X_train.csv')\n", (3991, 4031), True, 'import pandas as pd\n'), ((4038, 4099), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/sample_submission.csv"""'], {}), "('../input/career-con-2019/sample_submission.csv')\n", (4049, 4099), True, 'import pandas as pd\n'), ((4107, 4157), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_test.csv"""'], {}), "('../input/career-con-2019/X_test.csv')\n", (4118, 4157), True, 'import pandas as pd\n'), ((4167, 4218), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/y_train.csv"""'], {}), "('../input/career-con-2019/y_train.csv')\n", (4178, 4218), True, 'import pandas as pd\n'), ((4913, 4976), 'pandas.concat', 'pd.concat', (['[totalt, percent]'], {'axis': '(1)', 'keys': "['Total', 'Percent']"}), "([totalt, percent], axis=1, keys=['Total', 'Percent'])\n", (4922, 4976), True, 'import pandas as pd\n'), ((5202, 5265), 'pandas.concat', 'pd.concat', (['[totalt, percent]'], {'axis': '(1)', 'keys': "['Total', 'Percent']"}), "([totalt, percent], axis=1, keys=['Total', 'Percent'])\n", (5211, 5265), True, 'import pandas as pd\n'), ((6109, 6134), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (6116, 6134), True, 'import seaborn as sns\n'), ((6255, 6265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6263, 6265), True, 'import matplotlib.pyplot as plt\n'), ((6438, 6473), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(26, 8)'}), '(1, 1, figsize=(26, 8))\n', (6450, 6473), True, 'import matplotlib.pyplot as plt\n'), ((6641, 6717), 'seaborn.heatmap', 'sns.heatmap', (['m'], {'linewidths': '(0.1)', 'linecolor': '"""black"""', 'annot': '(True)', 'cmap': '"""YlGnBu"""'}), "(m, linewidths=0.1, linecolor='black', annot=True, cmap='YlGnBu')\n", (6652, 6717), True, 'import seaborn as sns\n'), ((6781, 6791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6789, 6791), True, 'import matplotlib.pyplot as plt\n'), ((6923, 6950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(23, 5)'}), '(figsize=(23, 5))\n', (6933, 6950), True, 'import matplotlib.pyplot as plt\n'), ((6951, 6976), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (6958, 6976), True, 'import seaborn as sns\n'), ((7063, 7073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7071, 7073), True, 'import matplotlib.pyplot as plt\n'), ((7462, 7490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(26, 16)'}), '(figsize=(26, 16))\n', (7472, 7490), True, 'import matplotlib.pyplot as plt\n'), ((7935, 7947), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7945, 7947), False, 'import gc\n'), ((9009, 9021), 'gc.collect', 'gc.collect', ([], {}), '()\n', (9019, 9021), False, 'import gc\n'), ((9079, 9107), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (9091, 9107), True, 'import matplotlib.pyplot as plt\n'), ((9249, 9277), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (9261, 9277), True, 'import matplotlib.pyplot as plt\n'), ((11180, 11231), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_train.csv"""'], {}), "('../input/career-con-2019/X_train.csv')\n", (11191, 11231), True, 'import pandas as pd\n'), ((11242, 11293), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/y_train.csv"""'], {}), "('../input/career-con-2019/y_train.csv')\n", (11253, 11293), True, 'import pandas as pd\n'), ((12249, 12323), 'pandas.merge', 'pd.merge', (['t', "train_y[['series_id', 'surface', 'group_id']]"], {'on': '"""series_id"""'}), "(t, train_y[['series_id', 'surface', 'group_id']], on='series_id')\n", (12257, 12323), True, 'import pandas as pd\n'), ((12893, 12924), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 8 * 7)'}), '(figsize=(14, 8 * 7))\n', (12903, 12924), True, 'import matplotlib.pyplot as plt\n'), ((13832, 13898), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['b', "stat.at[k, 'mean']"], {'yerr': "stat.at[k, 'dev']", 'fmt': 'v'}), "(b, stat.at[k, 'mean'], yerr=stat.at[k, 'dev'], fmt=v)\n", (13844, 13898), True, 'import matplotlib.pyplot as plt\n'), ((13897, 13956), 'matplotlib.pyplot.title', 'plt.title', (['"""sample for error bars (lz_f, surface concrete)"""'], {}), "('sample for error bars (lz_f, surface concrete)')\n", (13906, 13956), True, 'import matplotlib.pyplot as plt\n'), ((13957, 13990), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency component"""'], {}), "('frequency component')\n", (13967, 13990), True, 'import matplotlib.pyplot as plt\n'), ((13991, 14014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""amplitude"""'], {}), "('amplitude')\n", (14001, 14014), True, 'import matplotlib.pyplot as plt\n'), ((14016, 14026), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14024, 14026), True, 'import matplotlib.pyplot as plt\n'), ((14062, 14074), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14072, 14074), False, 'import gc\n'), ((17385, 17413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(26, 16)'}), '(figsize=(26, 16))\n', (17395, 17413), True, 'import matplotlib.pyplot as plt\n'), ((17704, 17732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(26, 16)'}), '(figsize=(26, 16))\n', (17714, 17732), True, 'import matplotlib.pyplot as plt\n'), ((20913, 20951), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(4)', 'figsize': '(18, 5)'}), '(ncols=4, figsize=(18, 5))\n', (20925, 20951), True, 'import matplotlib.pyplot as plt\n'), ((20983, 21033), 'seaborn.kdeplot', 'sns.kdeplot', (["data['norm_X']"], {'ax': 'ax1', 'label': '"""train"""'}), "(data['norm_X'], ax=ax1, label='train')\n", (20994, 21033), True, 'import seaborn as sns\n'), ((21034, 21083), 'seaborn.kdeplot', 'sns.kdeplot', (["test['norm_X']"], {'ax': 'ax1', 'label': '"""test"""'}), "(test['norm_X'], ax=ax1, label='test')\n", (21045, 21083), True, 'import seaborn as sns\n'), ((21115, 21165), 'seaborn.kdeplot', 'sns.kdeplot', (["data['norm_Y']"], {'ax': 'ax2', 'label': '"""train"""'}), "(data['norm_Y'], ax=ax2, label='train')\n", (21126, 21165), True, 'import seaborn as sns\n'), ((21166, 21215), 'seaborn.kdeplot', 'sns.kdeplot', (["test['norm_Y']"], {'ax': 'ax2', 'label': '"""test"""'}), "(test['norm_Y'], ax=ax2, label='test')\n", (21177, 21215), True, 'import seaborn as sns\n'), ((21247, 21297), 'seaborn.kdeplot', 'sns.kdeplot', (["data['norm_Z']"], {'ax': 'ax3', 'label': '"""train"""'}), "(data['norm_Z'], ax=ax3, label='train')\n", (21258, 21297), True, 'import seaborn as sns\n'), ((21298, 21347), 'seaborn.kdeplot', 'sns.kdeplot', (["test['norm_Z']"], {'ax': 'ax3', 'label': '"""test"""'}), "(test['norm_Z'], ax=ax3, label='test')\n", (21309, 21347), True, 'import seaborn as sns\n'), ((21379, 21429), 'seaborn.kdeplot', 'sns.kdeplot', (["data['norm_W']"], {'ax': 'ax4', 'label': '"""train"""'}), "(data['norm_W'], ax=ax4, label='train')\n", (21390, 21429), True, 'import seaborn as sns\n'), ((21430, 21479), 'seaborn.kdeplot', 'sns.kdeplot', (["test['norm_W']"], {'ax': 'ax4', 'label': '"""test"""'}), "(test['norm_W'], ax=ax4, label='test')\n", (21441, 21479), True, 'import seaborn as sns\n'), ((21481, 21491), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21489, 21491), True, 'import matplotlib.pyplot as plt\n'), ((22257, 22295), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'figsize': '(15, 5)'}), '(ncols=3, figsize=(15, 5))\n', (22269, 22295), True, 'import matplotlib.pyplot as plt\n'), ((22319, 22370), 'seaborn.kdeplot', 'sns.kdeplot', (["data['euler_x']"], {'ax': 'ax1', 'label': '"""train"""'}), "(data['euler_x'], ax=ax1, label='train')\n", (22330, 22370), True, 'import seaborn as sns\n'), ((22371, 22421), 'seaborn.kdeplot', 'sns.kdeplot', (["test['euler_x']"], {'ax': 'ax1', 'label': '"""test"""'}), "(test['euler_x'], ax=ax1, label='test')\n", (22382, 22421), True, 'import seaborn as sns\n'), ((22446, 22497), 'seaborn.kdeplot', 'sns.kdeplot', (["data['euler_y']"], {'ax': 'ax2', 'label': '"""train"""'}), "(data['euler_y'], ax=ax2, label='train')\n", (22457, 22497), True, 'import seaborn as sns\n'), ((22498, 22548), 'seaborn.kdeplot', 'sns.kdeplot', (["test['euler_y']"], {'ax': 'ax2', 'label': '"""test"""'}), "(test['euler_y'], ax=ax2, label='test')\n", (22509, 22548), True, 'import seaborn as sns\n'), ((22571, 22622), 'seaborn.kdeplot', 'sns.kdeplot', (["data['euler_z']"], {'ax': 'ax3', 'label': '"""train"""'}), "(data['euler_z'], ax=ax3, label='train')\n", (22582, 22622), True, 'import seaborn as sns\n'), ((22623, 22673), 'seaborn.kdeplot', 'sns.kdeplot', (["test['euler_z']"], {'ax': 'ax3', 'label': '"""test"""'}), "(test['euler_z'], ax=ax3, label='test')\n", (22634, 22673), True, 'import seaborn as sns\n'), ((22675, 22685), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22683, 22685), True, 'import matplotlib.pyplot as plt\n'), ((31394, 31453), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(59)'}), '(n_splits=10, shuffle=True, random_state=59)\n', (31409, 31453), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((31480, 31508), 'numpy.zeros', 'np.zeros', (['(test.shape[0], 9)'], {}), '((test.shape[0], 9))\n', (31488, 31508), True, 'import numpy as np\n'), ((31518, 31541), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {}), '(data.shape[0])\n', (31526, 31541), True, 'import numpy as np\n'), ((32800, 32845), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['measured', "target['surface']"], {}), "(measured, target['surface'])\n", (32816, 32845), False, 'from sklearn.metrics import confusion_matrix\n'), ((34148, 34215), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/final_submission.csv"""'], {}), "('../input/robots-best-submission/final_submission.csv')\n", (34159, 34215), True, 'import pandas as pd\n'), ((35766, 35827), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/mybest0.73.csv"""'], {}), "('../input/robots-best-submission/mybest0.73.csv')\n", (35777, 35827), True, 'import pandas as pd\n'), ((35837, 35896), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/sub_0.72.csv"""'], {}), "('../input/robots-best-submission/sub_0.72.csv')\n", (35848, 35896), True, 'import pandas as pd\n'), ((35908, 35969), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/sub_0.72_2.csv"""'], {}), "('../input/robots-best-submission/sub_0.72_2.csv')\n", (35919, 35969), True, 'import pandas as pd\n'), ((35979, 36038), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/sub_0.71.csv"""'], {}), "('../input/robots-best-submission/sub_0.71.csv')\n", (35990, 36038), True, 'import pandas as pd\n'), ((36047, 36105), 'pandas.read_csv', 'pd.read_csv', (['"""../input/robots-best-submission/sub_0.6.csv"""'], {}), "('../input/robots-best-submission/sub_0.6.csv')\n", (36058, 36105), True, 'import pandas as pd\n'), ((36492, 36622), 'pandas.concat', 'pd.concat', (["[sub073['series_id'], sub073['surface073'], sub072['surface072'], sub071[\n 'surface071'], sub06['surface06']]"], {'axis': '(1)'}), "([sub073['series_id'], sub073['surface073'], sub072['surface072'],\n sub071['surface071'], sub06['surface06']], axis=1)\n", (36501, 36622), True, 'import pandas as pd\n'), ((36853, 36903), 'pandas.DataFrame', 'pd.DataFrame', (['differents'], {'columns': "['idx', 'group']"}), "(differents, columns=['idx', 'group'])\n", (36865, 36903), True, 'import pandas as pd\n'), ((38819, 38838), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (38828, 38838), True, 'import matplotlib.style as style\n'), ((38863, 38914), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_train.csv"""'], {}), "('../input/career-con-2019/X_train.csv')\n", (38874, 38914), True, 'import pandas as pd\n'), ((38924, 38974), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/X_test.csv"""'], {}), "('../input/career-con-2019/X_test.csv')\n", (38935, 38974), True, 'import pandas as pd\n'), ((38984, 39035), 'pandas.read_csv', 'pd.read_csv', (['"""../input/career-con-2019/y_train.csv"""'], {}), "('../input/career-con-2019/y_train.csv')\n", (38995, 39035), True, 'import pandas as pd\n'), ((41534, 41561), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 8)'}), '(figsize=(24, 8))\n', (41544, 41561), True, 'import matplotlib.pyplot as plt\n'), ((41562, 41596), 'matplotlib.pyplot.title', 'plt.title', (['"""linear_acceleration_X"""'], {}), "('linear_acceleration_X')\n", (41571, 41596), True, 'import matplotlib.pyplot as plt\n'), ((41597, 41660), 'matplotlib.pyplot.plot', 'plt.plot', (['X_train.angular_velocity_Z[128:256]'], {'label': '"""original"""'}), "(X_train.angular_velocity_Z[128:256], label='original')\n", (41605, 41660), True, 'import matplotlib.pyplot as plt\n'), ((41662, 41734), 'matplotlib.pyplot.plot', 'plt.plot', (['X_train_denoised.angular_velocity_Z[128:256]'], {'label': '"""denoised"""'}), "(X_train_denoised.angular_velocity_Z[128:256], label='denoised')\n", (41670, 41734), True, 'import matplotlib.pyplot as plt\n'), ((41736, 41748), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (41746, 41748), True, 'import matplotlib.pyplot as plt\n'), ((41749, 41759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41757, 41759), True, 'import matplotlib.pyplot as plt\n'), ((7540, 7564), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (7551, 7564), True, 'import matplotlib.pyplot as plt\n'), ((7569, 7590), 'matplotlib.pyplot.plot', 'plt.plot', (['serie1[col]'], {}), '(serie1[col])\n', (7577, 7590), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7609), 'matplotlib.pyplot.title', 'plt.title', (['col'], {}), '(col)\n', (7604, 7609), True, 'import matplotlib.pyplot as plt\n'), ((8327, 8346), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (8336, 8346), True, 'import matplotlib.style as style\n'), ((8351, 8379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(28, 16)'}), '(figsize=(28, 16))\n', (8361, 8379), True, 'import matplotlib.pyplot as plt\n'), ((12587, 12631), 'pandas.DataFrame', 'pd.DataFrame', (["{'mean': [mean], 'dev': [dev]}"], {}), "({'mean': [mean], 'dev': [dev]})\n", (12599, 12631), True, 'import pandas as pd\n'), ((13447, 13520), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'ncol': '(3)', 'mode': '"""expand"""', 'shadow': '(True)', 'fancybox': '(True)'}), "(loc='best', ncol=3, mode='expand', shadow=True, fancybox=True)\n", (13457, 13520), True, 'import matplotlib.pyplot as plt\n'), ((13525, 13556), 'matplotlib.pyplot.title', 'plt.title', (["('sensor: ' + feature)"], {}), "('sensor: ' + feature)\n", (13534, 13556), True, 'import matplotlib.pyplot as plt\n'), ((13561, 13594), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""frequency component"""'], {}), "('frequency component')\n", (13571, 13594), True, 'import matplotlib.pyplot as plt\n'), ((13599, 13622), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""amplitude"""'], {}), "('amplitude')\n", (13609, 13622), True, 'import matplotlib.pyplot as plt\n'), ((15083, 15109), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (15096, 15109), True, 'import seaborn as sns\n'), ((15114, 15126), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15124, 15126), True, 'import matplotlib.pyplot as plt\n'), ((15141, 15176), 'matplotlib.pyplot.subplots', 'plt.subplots', (['a', 'b'], {'figsize': '(17, 9)'}), '(a, b, figsize=(17, 9))\n', (15153, 15176), True, 'import matplotlib.pyplot as plt\n'), ((15560, 15570), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15568, 15570), True, 'import matplotlib.pyplot as plt\n'), ((16159, 16185), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (16172, 16185), True, 'import seaborn as sns\n'), ((16190, 16202), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16200, 16202), True, 'import matplotlib.pyplot as plt\n'), ((16217, 16253), 'matplotlib.pyplot.subplots', 'plt.subplots', (['a', 'b'], {'figsize': '(16, 24)'}), '(a, b, figsize=(16, 24))\n', (16229, 16253), True, 'import matplotlib.pyplot as plt\n'), ((16655, 16665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16663, 16665), True, 'import matplotlib.pyplot as plt\n'), ((17466, 17490), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (17477, 17490), True, 'import matplotlib.pyplot as plt\n'), ((17496, 17510), 'matplotlib.pyplot.title', 'plt.title', (['col'], {}), '(col)\n', (17505, 17510), True, 'import matplotlib.pyplot as plt\n'), ((17785, 17809), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (17796, 17809), True, 'import matplotlib.pyplot as plt\n'), ((17814, 17862), 'seaborn.distplot', 'sns.distplot', (['data[col]'], {'bins': '(100)', 'label': '"""train"""'}), "(data[col], bins=100, label='train')\n", (17826, 17862), True, 'import seaborn as sns\n'), ((17867, 17914), 'seaborn.distplot', 'sns.distplot', (['test[col]'], {'bins': '(100)', 'label': '"""test"""'}), "(test[col], bins=100, label='test')\n", (17879, 17914), True, 'import seaborn as sns\n'), ((18996, 19014), 'math.atan2', 'math.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (19006, 19014), False, 'import math\n'), ((19126, 19139), 'math.asin', 'math.asin', (['t2'], {}), '(t2)\n', (19135, 19139), False, 'import math\n'), ((19219, 19237), 'math.atan2', 'math.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (19229, 19237), False, 'import math\n'), ((23355, 23369), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (23367, 23369), True, 'import pandas as pd\n'), ((25351, 25362), 'scipy.stats.kurtosis', 'kurtosis', (['x'], {}), '(x)\n', (25359, 25362), False, 'from scipy.stats import kurtosis\n'), ((25471, 25478), 'scipy.stats.skew', 'skew', (['x'], {}), '(x)\n', (25475, 25478), False, 'from scipy.stats import skew\n'), ((25500, 25511), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (25508, 25511), True, 'import numpy as np\n'), ((25520, 25539), 'numpy.append', 'np.append', (['x[-1]', 'x'], {}), '(x[-1], x)\n', (25529, 25539), True, 'import numpy as np\n'), ((25548, 25566), 'numpy.append', 'np.append', (['x', 'x[1]'], {}), '(x, x[1])\n', (25557, 25566), True, 'import numpy as np\n'), ((25668, 25712), 'numpy.heaviside', 'np.heaviside', (['((xn - xn_i1) * (xn - xn_i2))', '(0)'], {}), '((xn - xn_i1) * (xn - xn_i2), 0)\n', (25680, 25712), True, 'import numpy as np\n'), ((25760, 25771), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (25768, 25771), True, 'import numpy as np\n'), ((25780, 25799), 'numpy.append', 'np.append', (['x[-1]', 'x'], {}), '(x[-1], x)\n', (25789, 25799), True, 'import numpy as np\n'), ((25808, 25826), 'numpy.append', 'np.append', (['x', 'x[1]'], {}), '(x, x[1])\n', (25817, 25826), True, 'import numpy as np\n'), ((26155, 26166), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (26163, 26166), True, 'import numpy as np\n'), ((26175, 26194), 'numpy.append', 'np.append', (['x[-1]', 'x'], {}), '(x[-1], x)\n', (26184, 26194), True, 'import numpy as np\n'), ((26203, 26221), 'numpy.append', 'np.append', (['x', 'x[1]'], {}), '(x, x[1])\n', (26212, 26221), True, 'import numpy as np\n'), ((26431, 26445), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (26443, 26445), True, 'import pandas as pd\n'), ((28134, 28148), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28146, 28148), True, 'import pandas as pd\n'), ((31675, 31726), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(500)', 'n_jobs': '(-1)'}), '(n_estimators=500, n_jobs=-1)\n', (31697, 31726), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((32243, 32266), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (32253, 32266), True, 'import numpy as np\n'), ((32710, 32722), 'gc.collect', 'gc.collect', ([], {}), '()\n', (32720, 32722), False, 'import gc\n'), ((33032, 33061), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['truth', 'pred'], {}), '(truth, pred)\n', (33048, 33061), False, 'from sklearn.metrics import confusion_matrix\n'), ((33153, 33181), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (33163, 33181), True, 'import matplotlib.pyplot as plt\n'), ((33186, 33244), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.Blues'}), "(cm, interpolation='nearest', cmap=plt.cm.Blues)\n", (33196, 33244), True, 'import matplotlib.pyplot as plt\n'), ((33249, 33287), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion matrix"""'], {'size': '(15)'}), "('Confusion matrix', size=15)\n", (33258, 33287), True, 'import matplotlib.pyplot as plt\n'), ((33292, 33330), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.046)', 'pad': '(0.04)'}), '(fraction=0.046, pad=0.04)\n', (33304, 33330), True, 'import matplotlib.pyplot as plt\n'), ((33376, 33420), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (33386, 33420), True, 'import matplotlib.pyplot as plt\n'), ((33425, 33456), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (33435, 33456), True, 'import matplotlib.pyplot as plt\n'), ((33762, 33786), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (33772, 33786), True, 'import matplotlib.pyplot as plt\n'), ((33791, 33820), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (33801, 33820), True, 'import matplotlib.pyplot as plt\n'), ((33825, 33840), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (33833, 33840), True, 'import matplotlib.pyplot as plt\n'), ((33845, 33863), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (33861, 33863), True, 'import matplotlib.pyplot as plt\n'), ((39284, 39303), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (39293, 39303), True, 'import matplotlib.style as style\n'), ((39308, 39336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(28, 16)'}), '(figsize=(28, 16))\n', (39318, 39336), True, 'import matplotlib.pyplot as plt\n'), ((8731, 8755), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (8742, 8755), True, 'import matplotlib.pyplot as plt\n'), ((8764, 8827), 'matplotlib.pyplot.plot', 'plt.plot', (['series_dict[series_id][col]'], {'color': 'color', 'linewidth': '(3)'}), '(series_dict[series_id][col], color=color, linewidth=3)\n', (8772, 8827), True, 'import matplotlib.pyplot as plt\n'), ((8836, 8850), 'matplotlib.pyplot.title', 'plt.title', (['col'], {}), '(col)\n', (8845, 8850), True, 'import matplotlib.pyplot as plt\n'), ((11422, 11700), 'pandas.DataFrame', 'pd.DataFrame', (["{'lx': [d['linear_acceleration_X'].values], 'ly': [d[\n 'linear_acceleration_Y'].values], 'lz': [d['linear_acceleration_Z'].\n values], 'ax': [d['angular_velocity_X'].values], 'ay': [d[\n 'angular_velocity_Y'].values], 'az': [d['angular_velocity_Z'].values]}"], {}), "({'lx': [d['linear_acceleration_X'].values], 'ly': [d[\n 'linear_acceleration_Y'].values], 'lz': [d['linear_acceleration_Z'].\n values], 'ax': [d['angular_velocity_X'].values], 'ay': [d[\n 'angular_velocity_Y'].values], 'az': [d['angular_velocity_Z'].values]})\n", (11434, 11700), True, 'import pandas as pd\n'), ((12548, 12560), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (12557, 12560), False, 'import math\n'), ((13315, 13358), 'matplotlib.pyplot.plot', 'plt.plot', (['b', "stat.at[k, 'mean']", 'v'], {'label': 'k'}), "(b, stat.at[k, 'mean'], v, label=k)\n", (13323, 13358), True, 'import matplotlib.pyplot as plt\n'), ((15227, 15247), 'matplotlib.pyplot.subplot', 'plt.subplot', (['a', 'b', 'i'], {}), '(a, b, i)\n', (15238, 15247), True, 'import matplotlib.pyplot as plt\n'), ((15254, 15301), 'seaborn.kdeplot', 'sns.kdeplot', (['df1[feature]'], {'bw': '(0.5)', 'label': 'label1'}), '(df1[feature], bw=0.5, label=label1)\n', (15265, 15301), True, 'import seaborn as sns\n'), ((15309, 15356), 'seaborn.kdeplot', 'sns.kdeplot', (['df2[feature]'], {'bw': '(0.5)', 'label': 'label2'}), '(df2[feature], bw=0.5, label=label2)\n', (15320, 15356), True, 'import seaborn as sns\n'), ((15364, 15395), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['feature'], {'fontsize': '(9)'}), '(feature, fontsize=9)\n', (15374, 15395), True, 'import matplotlib.pyplot as plt\n'), ((15419, 15431), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (15429, 15431), True, 'import matplotlib.pyplot as plt\n'), ((15440, 15493), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""major"""', 'labelsize': '(8)'}), "(axis='x', which='major', labelsize=8)\n", (15455, 15493), True, 'import matplotlib.pyplot as plt\n'), ((15502, 15555), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""major"""', 'labelsize': '(8)'}), "(axis='y', which='major', labelsize=8)\n", (15517, 15555), True, 'import matplotlib.pyplot as plt\n'), ((16304, 16324), 'matplotlib.pyplot.subplot', 'plt.subplot', (['a', 'b', 'i'], {}), '(a, b, i)\n', (16315, 16324), True, 'import matplotlib.pyplot as plt\n'), ((16459, 16490), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['feature'], {'fontsize': '(9)'}), '(feature, fontsize=9)\n', (16469, 16490), True, 'import matplotlib.pyplot as plt\n'), ((16514, 16526), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (16524, 16526), True, 'import matplotlib.pyplot as plt\n'), ((16535, 16588), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""major"""', 'labelsize': '(8)'}), "(axis='x', which='major', labelsize=8)\n", (16550, 16588), True, 'import matplotlib.pyplot as plt\n'), ((16597, 16650), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""major"""', 'labelsize': '(8)'}), "(axis='y', which='major', labelsize=8)\n", (16612, 16650), True, 'import matplotlib.pyplot as plt\n'), ((17604, 17652), 'seaborn.kdeplot', 'sns.kdeplot', (['surface_feature[col]'], {'label': 'surface'}), '(surface_feature[col], label=surface)\n', (17615, 17652), True, 'import seaborn as sns\n'), ((26293, 26321), 'numpy.heaviside', 'np.heaviside', (['(-xn * xn_i2)', '(0)'], {}), '(-xn * xn_i2, 0)\n', (26305, 26321), True, 'import numpy as np\n'), ((30299, 30316), 'pandas.DataFrame', 'pd.DataFrame', (['sol'], {}), '(sol)\n', (30311, 30316), True, 'import pandas as pd\n'), ((32401, 32428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 10)'}), '(figsize=(7, 10))\n', (32411, 32428), True, 'import matplotlib.pyplot as plt\n'), ((32437, 32469), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importances"""'], {}), "('Feature Importances')\n", (32446, 32469), True, 'import matplotlib.pyplot as plt\n'), ((32652, 32685), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Relative Importance"""'], {}), "('Relative Importance')\n", (32662, 32685), True, 'import matplotlib.pyplot as plt\n'), ((32694, 32704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32702, 32704), True, 'import matplotlib.pyplot as plt\n'), ((39688, 39712), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(i + 1)'], {}), '(3, 4, i + 1)\n', (39699, 39712), True, 'import matplotlib.pyplot as plt\n'), ((39721, 39784), 'matplotlib.pyplot.plot', 'plt.plot', (['series_dict[series_id][col]'], {'color': 'color', 'linewidth': '(3)'}), '(series_dict[series_id][col], color=color, linewidth=3)\n', (39729, 39784), True, 'import matplotlib.pyplot as plt\n'), ((39793, 39807), 'matplotlib.pyplot.title', 'plt.title', (['col'], {}), '(col)\n', (39802, 39807), True, 'import matplotlib.pyplot as plt\n'), ((16406, 16451), 'seaborn.kdeplot', 'sns.kdeplot', (['ttc[feature]'], {'bw': '(0.5)', 'label': 'clas'}), '(ttc[feature], bw=0.5, label=clas)\n', (16417, 16451), True, 'import seaborn as sns\n'), ((25401, 25410), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (25407, 25410), True, 'import numpy as np\n'), ((25427, 25436), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (25433, 25436), True, 'import numpy as np\n'), ((11834, 11850), 'math.sqrt', 'math.sqrt', (['(128.0)'], {}), '(128.0)\n', (11843, 11850), False, 'import math\n'), ((23901, 23911), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (23908, 23911), True, 'import numpy as np\n'), ((24818, 24827), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (24824, 24827), True, 'import numpy as np\n'), ((24917, 24926), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (24923, 24926), True, 'import numpy as np\n'), ((11872, 11885), 'numpy.fft.fft', 'np.fft.fft', (['x'], {}), '(x)\n', (11882, 11885), True, 'import numpy as np\n'), ((24604, 24614), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (24611, 24614), True, 'import numpy as np\n'), ((30164, 30190), 'numpy.ones', 'np.ones', (['corr_matrix.shape'], {}), '(corr_matrix.shape)\n', (30171, 30190), True, 'import numpy as np\n')] |
from scipy.sparse import coo_matrix, csr_matrix, diags
from scheduler import PyScheduler
import numpy as np
# A = coo_matrix(np.array([[10, 1, 0, 0, 2],
# [1, 10, 0, 0, 3],
# [0, 0, 10, 0, 4],
# [0, 0, 0, 10, 5],
# [2, 3, 4, 5, 10]]).astype(np.float32))
edges = np.array([(0, 1, 1), (0, 2, 1), (0, 3, 1),
(1, 4, 1), (1, 5, 1), (1, 6, 1),
(2, 7, 1), (2, 8, 1), (2, 9, 1),
(3, 10, 1)])
adj = csr_matrix((edges[:,2], (edges[:,0], edges[:,1])),
shape=(11, 11), dtype=np.float32)
adj = adj + adj.transpose()
# Row normalize
deg = np.array(adj.sum(axis=0)).flatten()
deg = diags(1.0/deg, 0)
adj = deg.dot(adj)
print(adj)
L = 2
placeholders = {
'adj': ['adj_{}'.format(i) for i in range(L)],
'madj': ['madj_{}'.format(i) for i in range(L)],
'fadj': ['fadj_{}'.format(i) for i in range(L)],
'fields': ['fields_{}'.format(i) for i in range(L+1)],
'ffields': ['ffields_{}'.format(i) for i in range(L+1)],
'scales': ['scales_{}'.format(i) for i in range(L)],
'labels': 'labels'
}
labels = np.zeros((11, 2))
sch = PyScheduler(adj, labels, 2, [1, 2], placeholders, 0, cv=True)
feed_dict = sch.batch(np.array([0], dtype=np.int32))
for k in feed_dict:
print(k)
print(feed_dict[k])
| [
"scipy.sparse.diags",
"numpy.zeros",
"scipy.sparse.csr_matrix",
"numpy.array",
"scheduler.PyScheduler"
] | [((363, 488), 'numpy.array', 'np.array', (['[(0, 1, 1), (0, 2, 1), (0, 3, 1), (1, 4, 1), (1, 5, 1), (1, 6, 1), (2, 7, 1\n ), (2, 8, 1), (2, 9, 1), (3, 10, 1)]'], {}), '([(0, 1, 1), (0, 2, 1), (0, 3, 1), (1, 4, 1), (1, 5, 1), (1, 6, 1),\n (2, 7, 1), (2, 8, 1), (2, 9, 1), (3, 10, 1)])\n', (371, 488), True, 'import numpy as np\n'), ((542, 634), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(edges[:, 2], (edges[:, 0], edges[:, 1]))'], {'shape': '(11, 11)', 'dtype': 'np.float32'}), '((edges[:, 2], (edges[:, 0], edges[:, 1])), shape=(11, 11), dtype\n =np.float32)\n', (552, 634), False, 'from scipy.sparse import coo_matrix, csr_matrix, diags\n'), ((737, 756), 'scipy.sparse.diags', 'diags', (['(1.0 / deg)', '(0)'], {}), '(1.0 / deg, 0)\n', (742, 756), False, 'from scipy.sparse import coo_matrix, csr_matrix, diags\n'), ((1215, 1232), 'numpy.zeros', 'np.zeros', (['(11, 2)'], {}), '((11, 2))\n', (1223, 1232), True, 'import numpy as np\n'), ((1239, 1300), 'scheduler.PyScheduler', 'PyScheduler', (['adj', 'labels', '(2)', '[1, 2]', 'placeholders', '(0)'], {'cv': '(True)'}), '(adj, labels, 2, [1, 2], placeholders, 0, cv=True)\n', (1250, 1300), False, 'from scheduler import PyScheduler\n'), ((1323, 1352), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (1331, 1352), True, 'import numpy as np\n')] |
"""
<NAME>
2016-06-06
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from skimage.feature import canny
from skimage.segmentation import clear_border
from skimage.morphology import dilation, rectangle
from skimage.measure import regionprops
import cv2 as cv
from utils import (calculate_convexity,
calculate_circularity_reciprocal)
CHANNEL_CODE = {'blue': 0, 'green': 1, 'red': 2}
DEFAULT_FILTERS = {'circularity_reciprocal': {'min': 0.2, 'max': 1.6},
'convexity': {'min': 0.92}}
class NotAllowedChannel(Exception):
"""
Exception placeholder for easier debugging.
"""
pass
class Logger(object):
"""
Log the sequence of log statements performed
"""
def __init__(self):
self.log = []
def add_log(self, message):
"""add a log statement to the sequence"""
self.log.append(message)
def get_last_log(self):
return self.log[-1]
def print_log_sequence(self):
print("Steps undertaken since from raw image:")
print("\n".join(self.log))
print("\n")
def clear_log(self):
"""clear all the logs"""
self.log = []
def batchbubblekicker(data_path, channel, pipeline, *args):
"""
Given a folder with processable files and a channel to use, a sequence
of steps class as implemented in the pipelines.py file will be applied on
each of the individual images
:param data_path: folder containing images to process
:param channel: green | red | blue
:param pipeline: class from pipelines.py to use as processing sequence
:param args: arguments required by the pipeline
:return: dictionary with for each file the output binary image
"""
results = {}
for imgfile in os.listdir(data_path):
current_bubbler = pipeline(os.path.join(data_path, imgfile),
channel=channel)
results[imgfile] = current_bubbler.run(*args)
return results
class BubbleKicker(object):
def __init__(self, filename, channel='red'):
"""
This class contains a set of functions that can be applied to a
bubble image in order to derive a binary bubble-image and calculate the
statistics/distribution
:param filename: image file name
:param channel: green | red | blue
"""
self.raw_file = self._read_image(filename)
self.logs = Logger()
self._channel_control(channel)
self._channel = channel
self.raw_image = self.raw_file[:, :, CHANNEL_CODE[self._channel]]
self.current_image = self.raw_image.copy()
@staticmethod
def _read_image(filename):
"""read the image from a file and store
an RGB-image MxNx3
"""
image = cv.imread(filename)
return image
def reset_to_raw(self):
"""make the current image again the raw image"""
self.current_image = self.raw_image.copy()
self.logs.clear_log()
def switch_channel(self, channel):
"""change the color channel"""
self._channel_control(channel)
self._channel = channel
self.raw_image = self.raw_file[:, :, CHANNEL_CODE[self._channel]]
self.current_image = self.raw_image.copy()
self.logs.clear_log()
print("Currently using channel {}".format(self._channel))
def what_channel(self):
"""check the current working channel (R, G or B?)"""
print(self._channel)
@staticmethod
def _channel_control(channel):
"""check if channel is either red, green, blue"""
if channel not in ['red', 'green', 'blue']:
raise NotAllowedChannel('Not a valid channel for '
'RGB color scheme!')
def edge_detect_canny_opencv(self, threshold=[0.01, 0.5]):
"""perform the edge detection algorithm of Canny on the image using
the openCV package. Thresholds are respectively min and max threshodls for building
the gaussian."""
image = cv.Canny(self.current_image,
threshold[0],
threshold[1])
self.current_image = image
self.logs.add_log('edge-detect with thresholds {} -> {} '
'- opencv'.format(threshold[0], threshold[1]))
return image
def edge_detect_canny_skimage(self, sigma=3, threshold=[0.01, 0.5]):
"""perform the edge detection algorithm of Canny on the image using scikit package"""
image = canny(self.current_image,
sigma=sigma,
low_threshold=threshold[0],
high_threshold=threshold[1])
self.current_image = image
# append function to logs
self.logs.add_log('edge-detect with '
'thresholds {} -> {} and sigma {} '
'- skimage'.format(threshold[0],
threshold[1],
sigma))
return image
def adaptive_threshold_opencv(self, blocksize=91, cvalue=18):
"""
perform the edge detection algorithm of Canny on the image using an
adaptive threshold method for which the user can specify width of the
window of action and a C value used as reference for building
the gaussian distribution. This function uses the openCV package
Parameters
----------
blocksize:
cvalue:
"""
image = cv.adaptiveThreshold(self.current_image, 1,
cv.ADAPTIVE_THRESH_GAUSSIAN_C,
cv.THRESH_BINARY, blocksize, cvalue)
self.current_image = image
self.logs.add_log('adaptive threshold bubble detection '
'with blocksize {} and cvalue {} '
'- opencv'.format(blocksize, cvalue))
return image
def dilate_opencv(self, footprintsize=3):
"""perform the dilation of the image"""
# set up structuring element with footprintsize
kernel = np.ones((footprintsize, footprintsize), np.uint8)
# perform algorithm with given environment,
# store in same memory location
image = cv.dilate(self.current_image, kernel, iterations=1)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('dilate with footprintsize {} '
'- opencv'.format(footprintsize))
return image
def dilate_skimage(self):
"""perform the dilation of the image"""
# set up structuring element
# (@Giacomo, is (1, 90) and (1, 0) different? using rectangle here...
struct_env = rectangle(1, 1)
# perform algorithm with given environment,
# store in same memory location
image = dilation(self.current_image, selem=struct_env,
out=self.current_image)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('dilate - skimage')
return image
def fill_holes_opencv(self):
"""fill the holes of the image"""
# perform algorithm
h, w = self.current_image.shape[:2] # stores image sizes
mask = np.zeros((h + 2, w + 2), np.uint8)
# floodfill operates on the saved image itself
cv.floodFill(self.current_image, mask, (0, 0), 0)
# append function to logs
self.logs.add_log('fill holes - opencv')
return self.current_image
def clear_border_skimage(self, buffer_size=3, bgval=1):
"""clear the borders of the image using a belt of pixels definable in buffer_size and
asign a pixel value of bgval
Parameters
----------
buffer_size: int
indicates the belt of pixels around the image border that should be considered to
eliminate touching objects (default is 3)
bgvalue: int
all touching objects are set to this value (default is 1)
"""
# perform algorithm
image_inv = cv.bitwise_not(self.current_image)
image = clear_border(image_inv, buffer_size=buffer_size, bgval=bgval)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('clear border with buffer size {} and bgval {} '
'- skimage'.format(buffer_size, bgval))
return image
def erode_opencv(self, footprintsize=1):
"""erode detected edges with a given footprint. This function is meant to be used after dilation of the edges so to reset the original edge."""
kernel = np.ones((footprintsize, footprintsize), np.uint8)
image = cv.erode(self.current_image, kernel, iterations=1)
# update current image
self.current_image = image
# append function to logs
self.logs.add_log('erode with footprintsize {} '
'- opencv'.format(footprintsize))
return image
def what_have_i_done(self):
""" print the current log statements as a sequence of
performed steps"""
self.logs.print_log_sequence()
def plot(self):
"""plot the current image"""
fig, ax = plt.subplots()
ax.imshow(self.current_image, cmap=plt.cm.gray)
if len(self.logs.log) > 0:
ax.set_title(self.logs.log[-1])
return fig, ax
def _bubble_properties_table(binary_image):
"""provide a label for each bubble in the image"""
nbubbles, marker_image = cv.connectedComponents(1 - binary_image)
props = regionprops(marker_image)
bubble_properties = \
pd.DataFrame([{"label": bubble.label,
"area": bubble.area,
"centroid": bubble.centroid,
"convex_area": bubble.convex_area,
"equivalent_diameter": bubble.equivalent_diameter,
"perimeter": bubble.perimeter} for bubble in props])
bubble_properties["convexity"] = \
calculate_convexity(bubble_properties["perimeter"],
bubble_properties["area"])
bubble_properties["circularity_reciprocal"] = \
calculate_circularity_reciprocal(bubble_properties["perimeter"],
bubble_properties["area"])
bubble_properties = bubble_properties.set_index("label")
return nbubbles, marker_image, bubble_properties
def _bubble_properties_filter(property_table, id_image,
rules=DEFAULT_FILTERS):
"""exclude bubbles based on a set of rules
:return:
"""
bubble_props = property_table.copy()
all_ids = bubble_props.index.tolist()
for prop_name, ruleset in rules.items():
print(ruleset)
for rule, value in ruleset.items():
if rule == 'min':
bubble_props = \
bubble_props[bubble_props[prop_name] > value]
elif rule == 'max':
bubble_props = \
bubble_props[bubble_props[prop_name] < value]
else:
raise Exception("Rule not supported, "
"use min or max as filter")
removed_ids = [el for el in all_ids if el
not in bubble_props.index.tolist()]
for idb in removed_ids:
id_image[id_image == idb] = 0
return id_image, bubble_props
def bubble_properties_calculate(binary_image,
rules=DEFAULT_FILTERS):
"""
:param binary_image:
:param rules:
:return:
"""
# get the bubble identifications and properties
nbubbles, id_image, \
prop_table = _bubble_properties_table(binary_image)
# filter based on the defined rules
id_image, properties = _bubble_properties_filter(prop_table,
id_image, rules)
return id_image, properties
def bubble_properties_plot(property_table,
which_property="equivalent_diameter",
bins=20):
"""calculate and create the distribution plot"""
fontsize_labels = 14.
formatter = FuncFormatter(
lambda y, pos: "{:d}%".format(int(round(y * 100))))
fig, ax1 = plt.subplots()
ax1.hist(property_table[which_property], bins,
normed=0, cumulative=False, histtype='bar',
color='gray', ec='white')
ax1.get_xaxis().tick_bottom()
# left axis - histogram
ax1.set_ylabel(r'Frequency', color='gray',
fontsize=fontsize_labels)
ax1.spines['top'].set_visible(False)
# right axis - cumul distribution
ax2 = ax1.twinx()
ax2.hist(property_table[which_property],
bins, normed=1, cumulative=True,
histtype='step', color='k', linewidth= 3.)
ax2.yaxis.set_major_formatter(formatter)
ax2.set_ylabel(r'Cumulative percentage (%)', color='k',
fontsize=fontsize_labels)
ax2.spines['top'].set_visible(False)
ax2.set_ylim(0, 1.)
# additional options
ax1.set_xlim(0, property_table[which_property].max())
ax1.tick_params(axis='x', which='both', pad=10)
ax1.set_xlabel(which_property)
return fig, (ax1, ax2)
| [
"utils.calculate_convexity",
"numpy.ones",
"cv2.adaptiveThreshold",
"cv2.floodFill",
"cv2.erode",
"os.path.join",
"skimage.morphology.dilation",
"skimage.measure.regionprops",
"pandas.DataFrame",
"cv2.dilate",
"cv2.connectedComponents",
"skimage.segmentation.clear_border",
"utils.calculate_c... | [((1904, 1925), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1914, 1925), False, 'import os\n'), ((10003, 10043), 'cv2.connectedComponents', 'cv.connectedComponents', (['(1 - binary_image)'], {}), '(1 - binary_image)\n', (10025, 10043), True, 'import cv2 as cv\n'), ((10057, 10082), 'skimage.measure.regionprops', 'regionprops', (['marker_image'], {}), '(marker_image)\n', (10068, 10082), False, 'from skimage.measure import regionprops\n'), ((10119, 10358), 'pandas.DataFrame', 'pd.DataFrame', (["[{'label': bubble.label, 'area': bubble.area, 'centroid': bubble.centroid,\n 'convex_area': bubble.convex_area, 'equivalent_diameter': bubble.\n equivalent_diameter, 'perimeter': bubble.perimeter} for bubble in props]"], {}), "([{'label': bubble.label, 'area': bubble.area, 'centroid':\n bubble.centroid, 'convex_area': bubble.convex_area,\n 'equivalent_diameter': bubble.equivalent_diameter, 'perimeter': bubble.\n perimeter} for bubble in props])\n", (10131, 10358), True, 'import pandas as pd\n'), ((10517, 10595), 'utils.calculate_convexity', 'calculate_convexity', (["bubble_properties['perimeter']", "bubble_properties['area']"], {}), "(bubble_properties['perimeter'], bubble_properties['area'])\n", (10536, 10595), False, 'from utils import calculate_convexity, calculate_circularity_reciprocal\n'), ((10687, 10782), 'utils.calculate_circularity_reciprocal', 'calculate_circularity_reciprocal', (["bubble_properties['perimeter']", "bubble_properties['area']"], {}), "(bubble_properties['perimeter'],\n bubble_properties['area'])\n", (10719, 10782), False, 'from utils import calculate_convexity, calculate_circularity_reciprocal\n'), ((12825, 12839), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12837, 12839), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2978), 'cv2.imread', 'cv.imread', (['filename'], {}), '(filename)\n', (2968, 2978), True, 'import cv2 as cv\n'), ((4242, 4298), 'cv2.Canny', 'cv.Canny', (['self.current_image', 'threshold[0]', 'threshold[1]'], {}), '(self.current_image, threshold[0], threshold[1])\n', (4250, 4298), True, 'import cv2 as cv\n'), ((4740, 4839), 'skimage.feature.canny', 'canny', (['self.current_image'], {'sigma': 'sigma', 'low_threshold': 'threshold[0]', 'high_threshold': 'threshold[1]'}), '(self.current_image, sigma=sigma, low_threshold=threshold[0],\n high_threshold=threshold[1])\n', (4745, 4839), False, 'from skimage.feature import canny\n'), ((5782, 5897), 'cv2.adaptiveThreshold', 'cv.adaptiveThreshold', (['self.current_image', '(1)', 'cv.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv.THRESH_BINARY', 'blocksize', 'cvalue'], {}), '(self.current_image, 1, cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY, blocksize, cvalue)\n', (5802, 5897), True, 'import cv2 as cv\n'), ((6398, 6447), 'numpy.ones', 'np.ones', (['(footprintsize, footprintsize)', 'np.uint8'], {}), '((footprintsize, footprintsize), np.uint8)\n', (6405, 6447), True, 'import numpy as np\n'), ((6561, 6612), 'cv2.dilate', 'cv.dilate', (['self.current_image', 'kernel'], {'iterations': '(1)'}), '(self.current_image, kernel, iterations=1)\n', (6570, 6612), True, 'import cv2 as cv\n'), ((7085, 7100), 'skimage.morphology.rectangle', 'rectangle', (['(1)', '(1)'], {}), '(1, 1)\n', (7094, 7100), False, 'from skimage.morphology import dilation, rectangle\n'), ((7214, 7284), 'skimage.morphology.dilation', 'dilation', (['self.current_image'], {'selem': 'struct_env', 'out': 'self.current_image'}), '(self.current_image, selem=struct_env, out=self.current_image)\n', (7222, 7284), False, 'from skimage.morphology import dilation, rectangle\n'), ((7680, 7714), 'numpy.zeros', 'np.zeros', (['(h + 2, w + 2)', 'np.uint8'], {}), '((h + 2, w + 2), np.uint8)\n', (7688, 7714), True, 'import numpy as np\n'), ((7780, 7829), 'cv2.floodFill', 'cv.floodFill', (['self.current_image', 'mask', '(0, 0)', '(0)'], {}), '(self.current_image, mask, (0, 0), 0)\n', (7792, 7829), True, 'import cv2 as cv\n'), ((8469, 8503), 'cv2.bitwise_not', 'cv.bitwise_not', (['self.current_image'], {}), '(self.current_image)\n', (8483, 8503), True, 'import cv2 as cv\n'), ((8521, 8582), 'skimage.segmentation.clear_border', 'clear_border', (['image_inv'], {'buffer_size': 'buffer_size', 'bgval': 'bgval'}), '(image_inv, buffer_size=buffer_size, bgval=bgval)\n', (8533, 8582), False, 'from skimage.segmentation import clear_border\n'), ((9077, 9126), 'numpy.ones', 'np.ones', (['(footprintsize, footprintsize)', 'np.uint8'], {}), '((footprintsize, footprintsize), np.uint8)\n', (9084, 9126), True, 'import numpy as np\n'), ((9144, 9194), 'cv2.erode', 'cv.erode', (['self.current_image', 'kernel'], {'iterations': '(1)'}), '(self.current_image, kernel, iterations=1)\n', (9152, 9194), True, 'import cv2 as cv\n'), ((9689, 9703), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9701, 9703), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1995), 'os.path.join', 'os.path.join', (['data_path', 'imgfile'], {}), '(data_path, imgfile)\n', (1975, 1995), False, 'import os\n')] |
import cv2
import numpy as np
class imageTOoMatrix:
def __init__(self, images_name, img_width, img_height):
self.images_name = images_name
self.img_width = img_width
self.img_height = img_height
self.img_size = (img_width * img_height)
def getMatrix(self):
col = len(self.images_name)
img_mat = np.zeros((self.img_size, col))
i = 0
for name in self.images_name:
gray = cv2.imread(name, 0)
gray = cv2.resize(gray, (self.img_width, self.img_height))
mat_gray = np.asmatrix(gray)
img_mat[:, i] = mat_gray.ravel()
i += 1
return img_mat
| [
"cv2.imread",
"numpy.asmatrix",
"numpy.zeros",
"cv2.resize"
] | [((367, 397), 'numpy.zeros', 'np.zeros', (['(self.img_size, col)'], {}), '((self.img_size, col))\n', (375, 397), True, 'import numpy as np\n'), ((474, 493), 'cv2.imread', 'cv2.imread', (['name', '(0)'], {}), '(name, 0)\n', (484, 493), False, 'import cv2\n'), ((514, 565), 'cv2.resize', 'cv2.resize', (['gray', '(self.img_width, self.img_height)'], {}), '(gray, (self.img_width, self.img_height))\n', (524, 565), False, 'import cv2\n'), ((590, 607), 'numpy.asmatrix', 'np.asmatrix', (['gray'], {}), '(gray)\n', (601, 607), True, 'import numpy as np\n')] |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.nonsymmetric."""
import itertools
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from scipy.spatial.distance import cosine
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import ate
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import ped
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import qre
class ExploitabilityDescentTest(parameterized.TestCase):
@staticmethod
def numerical_gradient(fun, x, eps=np.sqrt(np.finfo(float).eps)):
fun_0 = fun(x)
num_grad = [np.zeros_like(xi) for xi in x]
x_plus_dx = [np.copy(xi) for xi in x]
for i in range(len(x)):
for j in range(len(x[i])):
x_plus_dx[i][j] = x[i][j] + eps
num_grad[i][j] = (fun(x_plus_dx) - fun_0) / eps
x_plus_dx[i][j] = x[i][j]
return num_grad
@staticmethod
def prep_params(dist, pt, num_params):
params = [dist]
if num_params > 1:
num_players = len(dist)
nabla = [misc.pt_reduce(pt[i], dist, [i]) for i in range(num_players)]
params += [nabla] # policy_gradient
return tuple(params)
@parameterized.named_parameters(
("PED", (ped, False)),
("ATE_p=1", (ate, 1., False)),
("ATE_p=0.5", (ate, 0.5, False)),
("ATE_p=0.1", (ate, 0.1, False)),
("ATE_p=0", (ate, 0., False)),
("QRE_t=0.0", (qre, 0.0, False)),
("QRE_t=0.1", (qre, 0.1, False))
)
def test_exploitability_gradient_on_nonsymmetric_three_player_matrix_games(
self, solver_tuple, trials=100, max_num_strats=3, atol=1e-1, rtol=1e-1,
seed=1234):
num_players = 3
solver = solver_tuple[0].Solver(*solver_tuple[1:])
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
num_strats = random.randint(low=2, high=max_num_strats + 1,
size=num_players)
num_strats = tuple([int(ns) for ns in num_strats])
payoff_tensor = random.rand(num_players, *num_strats)
num_params = len(solver.init_vars(num_strats, num_players))
dirichlet_alpha = [np.ones(num_strats_i) for num_strats_i in num_strats]
dist = [random.dirichlet(alpha_i) for alpha_i in dirichlet_alpha]
params = self.prep_params(dist, payoff_tensor, num_params)
payoff_matrices = {}
for pi, pj in itertools.combinations(range(num_players), 2):
key = (pi, pj)
pt_i = misc.pt_reduce(payoff_tensor[pi], dist, [pi, pj])
pt_j = misc.pt_reduce(payoff_tensor[pj], dist, [pi, pj])
payoff_matrices[key] = np.stack((pt_i, pt_j), axis=0)
grad = solver.compute_gradients(params, payoff_matrices)[0][0]
grad = np.concatenate(grad) / float(num_players)
exp = lambda x: solver.exploitability(x, payoff_tensor) # pylint: disable=cell-var-from-loop
num_grad = np.concatenate(self.numerical_gradient(exp, dist))
successes += [np.logical_and(np.allclose(grad, num_grad, rtol, atol),
cosine(grad, num_grad) <= atol)]
perc = 100 * np.mean(successes)
logging.info("gradient accuracy success rate out of %d is %f", trials, perc)
self.assertGreaterEqual(
perc, 95., "exploitability gradient accuracy is too poor")
if __name__ == "__main__":
absltest.main()
| [
"open_spiel.python.algorithms.adidas_utils.helpers.misc.pt_reduce",
"absl.testing.absltest.main",
"numpy.stack",
"numpy.zeros_like",
"scipy.spatial.distance.cosine",
"numpy.copy",
"numpy.allclose",
"numpy.ones",
"numpy.random.RandomState",
"absl.logging.info",
"numpy.finfo",
"numpy.mean",
"a... | [((1928, 2195), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('PED', (ped, False))", "('ATE_p=1', (ate, 1.0, False))", "('ATE_p=0.5', (ate, 0.5, False))", "('ATE_p=0.1', (ate, 0.1, False))", "('ATE_p=0', (ate, 0.0, False))", "('QRE_t=0.0', (qre, 0.0, False))", "('QRE_t=0.1', (qre, 0.1, False))"], {}), "(('PED', (ped, False)), ('ATE_p=1', (ate, 1.0,\n False)), ('ATE_p=0.5', (ate, 0.5, False)), ('ATE_p=0.1', (ate, 0.1, \n False)), ('ATE_p=0', (ate, 0.0, False)), ('QRE_t=0.0', (qre, 0.0, False\n )), ('QRE_t=0.1', (qre, 0.1, False)))\n", (1958, 2195), False, 'from absl.testing import parameterized\n'), ((4080, 4095), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4093, 4095), False, 'from absl.testing import absltest\n'), ((2493, 2520), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2514, 2520), True, 'import numpy as np\n'), ((3876, 3952), 'absl.logging.info', 'logging.info', (['"""gradient accuracy success rate out of %d is %f"""', 'trials', 'perc'], {}), "('gradient accuracy success rate out of %d is %f', trials, perc)\n", (3888, 3952), False, 'from absl import logging\n'), ((1364, 1381), 'numpy.zeros_like', 'np.zeros_like', (['xi'], {}), '(xi)\n', (1377, 1381), True, 'import numpy as np\n'), ((1412, 1423), 'numpy.copy', 'np.copy', (['xi'], {}), '(xi)\n', (1419, 1423), True, 'import numpy as np\n'), ((3853, 3871), 'numpy.mean', 'np.mean', (['successes'], {}), '(successes)\n', (3860, 3871), True, 'import numpy as np\n'), ((1306, 1321), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1314, 1321), True, 'import numpy as np\n'), ((1794, 1826), 'open_spiel.python.algorithms.adidas_utils.helpers.misc.pt_reduce', 'misc.pt_reduce', (['pt[i]', 'dist', '[i]'], {}), '(pt[i], dist, [i])\n', (1808, 1826), False, 'from open_spiel.python.algorithms.adidas_utils.helpers import misc\n'), ((2896, 2917), 'numpy.ones', 'np.ones', (['num_strats_i'], {}), '(num_strats_i)\n', (2903, 2917), True, 'import numpy as np\n'), ((3220, 3269), 'open_spiel.python.algorithms.adidas_utils.helpers.misc.pt_reduce', 'misc.pt_reduce', (['payoff_tensor[pi]', 'dist', '[pi, pj]'], {}), '(payoff_tensor[pi], dist, [pi, pj])\n', (3234, 3269), False, 'from open_spiel.python.algorithms.adidas_utils.helpers import misc\n'), ((3285, 3334), 'open_spiel.python.algorithms.adidas_utils.helpers.misc.pt_reduce', 'misc.pt_reduce', (['payoff_tensor[pj]', 'dist', '[pi, pj]'], {}), '(payoff_tensor[pj], dist, [pi, pj])\n', (3299, 3334), False, 'from open_spiel.python.algorithms.adidas_utils.helpers import misc\n'), ((3366, 3396), 'numpy.stack', 'np.stack', (['(pt_i, pt_j)'], {'axis': '(0)'}), '((pt_i, pt_j), axis=0)\n', (3374, 3396), True, 'import numpy as np\n'), ((3479, 3499), 'numpy.concatenate', 'np.concatenate', (['grad'], {}), '(grad)\n', (3493, 3499), True, 'import numpy as np\n'), ((3726, 3765), 'numpy.allclose', 'np.allclose', (['grad', 'num_grad', 'rtol', 'atol'], {}), '(grad, num_grad, rtol, atol)\n', (3737, 3765), True, 'import numpy as np\n'), ((3802, 3824), 'scipy.spatial.distance.cosine', 'cosine', (['grad', 'num_grad'], {}), '(grad, num_grad)\n', (3808, 3824), False, 'from scipy.spatial.distance import cosine\n')] |
# Copyright (C) 2016 <NAME>
# All rights reserved.
import sys
import json
import base64
import struct
import pdb
import numpy as np
RGB_FORMAT = 6407
RGB_ALPHA_FORMAT = 6408
SRGB_FORMAT = 0x8C40
SRGB_ALPHA_FORMAT = 0x8C42
TEXTURE_2D_TARGET = 3553
UNSIGNED_BYTE_TYPE = 5121
CLAMP_TO_EDGE = 33071
LINEAR_MIPMAP_LINEAR = 9987
LINEAR = 9729
OBJECTS_NAME = 'nodes'
MESHES_NAME = 'meshes'
ACCESSORS_NAME = 'accessors'
IMAGES_NAME = 'images'
TEXTURES_NAME = 'textures'
SAMPLERS_NAME = 'samplers'
MATERIALS_NAME = 'materials'
BUFFER_VIEWS_NAME = 'bufferViews'
BUFFERS_NAME = 'buffers'
LAMPS_NAME = 'lights'
BASE64_DATA_HEADER = 'data:text/plain;base64,'
def get_buffer_view(gltf, buffer_view_name):
# Get the buffer
buffer_view = gltf[BUFFER_VIEWS_NAME][buffer_view_name]
buf_offset = buffer_view['byteOffset']
buf_length = buffer_view['byteLength']
buffer = gltf[BUFFERS_NAME][buffer_view['buffer']]
# Handle embedded data
data = None
if BASE64_DATA_HEADER == buffer['uri'][:len(BASE64_DATA_HEADER)]:
data = base64.b64decode(buffer['uri'][23:])
return data[buf_offset:buf_offset + buf_length]
def set_fake_mesh(gltf, obj_name, vertices, indices):
# Build the transformation matrix of this mesh
node_mat = np.identity(4)
# For each node in our hiearchy, basically.
this_node = obj_name
while this_node != "":
# Build the total transformation matrix
obj = gltf[OBJECTS_NAME][this_node]
# Although the matrix is technically stored in column major order, numpy
# expects row-major so just role with it, inverting the order of all
# transformations.
local_mat = np.array(obj['matrix']).reshape((4,4))
node_mat = np.dot(node_mat, local_mat)
# Find this node's parent
parent_node = ""
for node_name, node_obj in gltf[OBJECTS_NAME].items():
if this_node in node_obj['children']:
# We have our parent
parent_node = node_name
this_node = parent_node
obj = gltf[OBJECTS_NAME][obj_name]
# Find the mesh that this node (sorta) contains.
mesh_name = obj['meshes'][0]
# Remove the mesh
old_mesh = gltf[MESHES_NAME].pop(mesh_name)
if len(old_mesh['primitives']) > 1:
raise RuntimeError(("Fake mesh {} must only have one primitive, does"
" the material have more than one"
" material?").format(mesh))
prim = old_mesh['primitives'][0]
old_vertices_name = prim['attributes']['POSITION']
old_indices_name = prim['indices']
# Remove normals
old_normals_name = prim['attributes'].get('NORMALS', '')
if old_normals_name != '':
gltf[ACCESSORS_NAME].pop(old_normals_name)
# Change accessor names
vertices_obj = gltf[ACCESSORS_NAME].pop(old_vertices_name)
indices_obj = gltf[ACCESSORS_NAME].pop(old_indices_name)
# Remove normals
gltf[ACCESSORS_NAME].update({vertices: vertices_obj})
gltf[ACCESSORS_NAME].update({indices: indices_obj})
offset = vertices_obj['byteOffset']
stride = vertices_obj['byteStride']
count = vertices_obj['count']
vertices_data_view = get_buffer_view(gltf, vertices_obj['bufferView'])
if vertices_data_view == None:
raise RuntimeError(('Failed to find vertices data for'
' mesh {}').format(mesh))
#pdb.set_trace()
vertices_data = vertices_data_view[offset:]
# TODO: We assume 3 four byte floats per position value
out_vertices_data = bytearray(count * 3 * 4)
for i in range(count):
# Get the vertex data
x, y, z = struct.unpack_from('<fff', vertices_data, i * stride)
# Transform
new_pt = np.dot(np.array([x, y, z, 1.0]), node_mat)
# Repack
struct.pack_into('<fff', out_vertices_data, i * 3 * 4, new_pt[0],
new_pt[1], new_pt[2])
# Make a new buffer for CPU data, make a new buffer view and finally have
# the new vertices accessor reference the buffer view.
buffer_name = vertices + '_buffer'
gltf[BUFFERS_NAME][buffer_name] = {
'byteLength': len(out_vertices_data),
'uri': BASE64_DATA_HEADER + base64.b64encode(out_vertices_data).decode('ascii')
}
buffer_view_name = vertices + '_buffer_view'
gltf[BUFFER_VIEWS_NAME][buffer_view_name] = {
'buffer': buffer_name,
'byteLength': len(out_vertices_data),
'byteOffset': 0
}
gltf[ACCESSORS_NAME][vertices]['bufferView'] = buffer_view_name
gltf[ACCESSORS_NAME][vertices]['byteOffset'] = 0
gltf[ACCESSORS_NAME][vertices]['byteStride'] = 3 * 4
# Remove the object
gltf[OBJECTS_NAME].pop(obj_name)
# And all references to it!
for node_name, node in gltf[OBJECTS_NAME].items():
if obj_name in node['children']:
node['children'].remove(obj_name)
def remove_unused_accessors(gltf, save_these = []):
used_accessors = []
for mesh_name, mesh in gltf[MESHES_NAME].items():
for prim in mesh['primitives']:
these_accessors = []
these_accessors.append(prim['indices'])
these_accessors.extend(prim['attributes'].values())
for access in these_accessors:
if access not in used_accessors:
used_accessors.append(access)
rm_accessors = []
for name, access in gltf[ACCESSORS_NAME].items():
if name not in used_accessors or name not in save_these:
rm_accessors.append(name)
for buf in rm_accessors:
del gltf[ACCESSORS_NAME][buf]
def remove_unused_buffer_views(gltf):
used_buffers = []
for name, accessor in gltf[ACCESSORS_NAME].items():
used_buffers.append(accessor['bufferView'])
rm_buffers = []
for buf_name, buf in gltf[BUFFER_VIEWS_NAME].items():
if buf_name not in used_buffers:
rm_buffers.append(buf_name)
for buf in rm_buffers:
del gltf[BUFFER_VIEWS_NAME][buf]
def remove_unused_buffers(gltf):
used_buffers = []
for name, buffer_view in gltf[BUFFER_VIEWS_NAME].items():
used_buffers.append(buffer_view['buffer'])
rm_buffers = []
for buf_name, buf in gltf[BUFFERS_NAME].items():
if buf_name not in used_buffers:
rm_buffers.append(buf_name)
for buf in rm_buffers:
del gltf[BUFFERS_NAME][buf]
def remove_unused_data(gltf, save_accessors = []):
remove_unused_accessors(gltf, save_accessors)
remove_unused_buffer_views(gltf)
remove_unused_buffers(gltf)
def add_textures(gltf, textures, sampler):
if TEXTURES_NAME not in gltf:
gltf[TEXTURES_NAME] = {}
for tex, image in textures.items():
tex_gltf = {}
tex_gltf['format'] = RGB_ALPHA_FORMAT
tex_gltf['internalFormat'] = SRGB_ALPHA_FORMAT
tex_gltf['sampler'] = sampler
tex_gltf['source'] = image
tex_gltf['target'] = TEXTURE_2D_TARGET
tex_gltf['type'] = UNSIGNED_BYTE_TYPE
gltf[TEXTURES_NAME].update({tex: tex_gltf})
def add_images(gltf, images, image_dir):
if IMAGES_NAME not in gltf:
gltf[IMAGES_NAME] = {}
for image, url in images.items():
image_gltf = {}
image_gltf['uri'] = image_dir + '/' + url
gltf[IMAGES_NAME].update({image: image_gltf})
def add_lightmap_sampler(gltf, name):
sampler = {}
sampler['magFilter'] = LINEAR
sampler['minFilter'] = LINEAR_MIPMAP_LINEAR
sampler['wrapS'] = CLAMP_TO_EDGE
sampler['wrapT'] = CLAMP_TO_EDGE
gltf[SAMPLERS_NAME][name] = sampler
def make_unique_materials(gltf, mesh):
for prim in mesh['primitives']:
# Copy the material
mat_name = prim['material']
material = gltf[MATERIALS_NAME][mat_name]
# Add a new material with a new name
new_name = ''
for i in range(1,999):
new_name = mat_name + '.' + str(i)
if new_name not in gltf[MATERIALS_NAME]:
break
# Replace this primitive with that material
gltf[MATERIALS_NAME][new_name] = material.copy()
prim['material'] = new_name
def get_mesh(gltf, obj, i = 0):
meshes = obj.get('meshes', [])
# Too few meshes
if i >= len(meshes):
raise RuntimeError("Object doesn't have a mesh at index {}".format(i))
mesh = meshes[i]
return gltf[MESHES_NAME][mesh]
def set_diffusemap(gltf, obj, lightmap):
mesh_name = obj['meshes'][0]
mesh = gltf[MESHES_NAME][mesh_name]
for primitive in mesh['primitives']:
mat_name = primitive['material']
mat = gltf[MATERIALS_NAME][mat_name]
# This has the effect of removing most values for the given material.
mat['values'] = {'lightmap': lightmap}
set_technique(gltf, obj, 'forward_diffusemap')
def get_material(gltf, prim):
mat_name = prim.get('material', '')
if mat_name == '': return None
return gltf[MATERIALS_NAME][mat_name]
def remove_material_values(gltf, mesh, rm_names):
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None: continue
values = mat['values']
for name in rm_names:
if name in values:
del values[name]
def adjust_shininess(gltf, mesh, name):
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None: continue
values = mat['values']
if name in values:
shiny = values[name] / 50.0 * 16.0
if shiny > 1.0:
values[name] = shiny
def set_technique(gltf, obj, technique):
mesh = get_mesh(gltf, obj)
for prim in mesh['primitives']:
mat = get_material(gltf, prim)
if mat == None:
continue
mat['technique'] = technique
def remove_texcoords(gltf, mesh):
for prim in mesh['primitives']:
rm_names = []
for attrib_semantic, attrib_value in prim['attributes'].items():
# String matching!
if "TEXCOORD" in attrib_semantic:
rm_names.append(attrib_semantic)
for name in rm_names:
del prim['attributes'][name]
def update_fucked_texcoords(gltf, mesh):
for prim in mesh['primitives']:
if 'TEXCOORD_UVMap' in prim['attributes']:
old_mapping = prim['attributes'].pop('TEXCOORD_UVMap')
prim['attributes']['TEXCOORD_0'] = old_mapping
def remove_unused_materials(gltf):
mats_used = []
for mesh_name, mesh in gltf[MESHES_NAME].items():
for prim in mesh['primitives']:
mat_name = prim['material']
if mat_name not in mats_used:
mats_used.append(mat_name)
rm_mats = []
for mat in gltf[MATERIALS_NAME]:
if mat not in mats_used:
rm_mats.append(mat)
for mat in rm_mats:
del gltf[MATERIALS_NAME][mat]
def remove_unmaterialed_meshes(gltf):
for mesh_name, mesh in gltf[MESHES_NAME].items():
rm_prims = []
for i, prim in enumerate(mesh['primitives']):
if prim.get('material', '') == '':
rm_prims.append(i)
for prim_i in rm_prims:
del mesh['primitives'][prim_i]
def set_lamps_state(gltf, lamps, state):
for key, lamp in gltf['extras'][LAMPS_NAME].items():
if key in lamps:
lamp['active'] = state
def turn_lamps_off(gltf, lamps):
set_lamps_state(gltf, lamps, False)
def turn_lamps_on(gltf, lamps):
set_lamps_state(gltf, lamps, True)
if __name__ == '__main__':
if len(sys.argv) < 3:
sys.stderr.write('usage: ' + sys.argv[0] + ' <map.gltf> <out.gltf>\n')
sys.exit()
# Load glTF
with open(sys.argv[1]) as f:
gltf = json.load(f)
set_fake_mesh(gltf, 'Room', 'Collision_Vertices', 'Collision_Indices')
for obj_name, obj in gltf[OBJECTS_NAME].items():
try:
mesh = get_mesh(gltf, obj)
except RuntimeError:
continue
except KeyError:
raise
remove_material_values(gltf, mesh, ['specular', 'emission',
'ambient', 'uv_layers', 'textures'])
adjust_shininess(gltf, mesh, 'shininess')
remove_texcoords(gltf, mesh)
# Set dynamic lighting
set_technique(gltf, obj, 'deferred_dynamic_lights')
remove_unused_materials(gltf)
remove_unmaterialed_meshes(gltf)
turn_lamps_off(gltf, ['Left_Lamp_Spot', 'Night_Light', 'Right_Lamp_Spot'])
turn_lamps_on(gltf, ['Desk_Lamp'])
#remove_unused_data(gltf, ['Collision_Vertices', 'Collision_Indices'])
with open(sys.argv[2], 'w') as f:
json.dump(gltf, f, indent=4)
f.write('\n')
| [
"json.dump",
"json.load",
"numpy.identity",
"base64.b64decode",
"numpy.array",
"base64.b64encode",
"numpy.dot",
"sys.stderr.write",
"struct.pack_into",
"sys.exit",
"struct.unpack_from"
] | [((1268, 1282), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1279, 1282), True, 'import numpy as np\n'), ((1056, 1092), 'base64.b64decode', 'base64.b64decode', (["buffer['uri'][23:]"], {}), "(buffer['uri'][23:])\n", (1072, 1092), False, 'import base64\n'), ((1741, 1768), 'numpy.dot', 'np.dot', (['node_mat', 'local_mat'], {}), '(node_mat, local_mat)\n', (1747, 1768), True, 'import numpy as np\n'), ((3674, 3727), 'struct.unpack_from', 'struct.unpack_from', (['"""<fff"""', 'vertices_data', '(i * stride)'], {}), "('<fff', vertices_data, i * stride)\n", (3692, 3727), False, 'import struct\n'), ((3835, 3926), 'struct.pack_into', 'struct.pack_into', (['"""<fff"""', 'out_vertices_data', '(i * 3 * 4)', 'new_pt[0]', 'new_pt[1]', 'new_pt[2]'], {}), "('<fff', out_vertices_data, i * 3 * 4, new_pt[0], new_pt[1],\n new_pt[2])\n", (3851, 3926), False, 'import struct\n'), ((11586, 11656), 'sys.stderr.write', 'sys.stderr.write', (["('usage: ' + sys.argv[0] + ' <map.gltf> <out.gltf>\\n')"], {}), "('usage: ' + sys.argv[0] + ' <map.gltf> <out.gltf>\\n')\n", (11602, 11656), False, 'import sys\n'), ((11665, 11675), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11673, 11675), False, 'import sys\n'), ((11741, 11753), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11750, 11753), False, 'import json\n'), ((12672, 12700), 'json.dump', 'json.dump', (['gltf', 'f'], {'indent': '(4)'}), '(gltf, f, indent=4)\n', (12681, 12700), False, 'import json\n'), ((3773, 3797), 'numpy.array', 'np.array', (['[x, y, z, 1.0]'], {}), '([x, y, z, 1.0])\n', (3781, 3797), True, 'import numpy as np\n'), ((1682, 1705), 'numpy.array', 'np.array', (["obj['matrix']"], {}), "(obj['matrix'])\n", (1690, 1705), True, 'import numpy as np\n'), ((4247, 4282), 'base64.b64encode', 'base64.b64encode', (['out_vertices_data'], {}), '(out_vertices_data)\n', (4263, 4282), False, 'import base64\n')] |
import math
import numpy
import pylab
from classes import *
class DimensionSimulation(Simulation):
def __init__(self, width, height):
Simulation.__init__(self, 1, width, height)
self.lastSize = None
self.sizes = []
self.particles = []
def callback(self):
currentSize = self.field.aggregates[0].size()
currentParticles = self.field.aggregates[0].count()
if self.lastSize != None or currentSize != self.lastSize:
self.lastSize = currentSize
if currentSize != 0:
self.sizes.append(currentSize)
self.particles.append(currentParticles)
# The dimension is N(r) = k * r ** d
# so it is log(N) = d * log(r) + K
sizes = []
particles = []
for i in range(0, 10):
simulation = DimensionSimulation(50, 50)
simulation.animation = False
simulation.run(StandardParticle, 20, 2000)
sizes.extend(simulation.sizes)
particles.extend(simulation.particles)
# Make the lin reg
x = map(math.log, sizes)
y = map(math.log, particles)
m, b = numpy.polyfit(x, y, 1)
# plot the noisy data
pylab.plot(sizes, particles, linestyle='', marker='.', label = 'Experiment data')
# Plot the lin reg
pylab.plot(sizes, math.exp(b) * numpy.array(sizes) ** m, label = "Dimension " + str(round(m, 2)))
# Add some legends
pylab.xlabel('Size of the aggregates')
pylab.ylabel('Number of particles')
pylab.title('Fractal dimension of the aggregate')
pylab.legend()
# Set the axes in log.
pylab.semilogy()
pylab.semilogx()
# Show the graph.
pylab.show()
| [
"pylab.semilogy",
"pylab.title",
"pylab.show",
"math.exp",
"numpy.polyfit",
"pylab.ylabel",
"numpy.array",
"pylab.xlabel",
"pylab.semilogx",
"pylab.legend",
"pylab.plot"
] | [((1063, 1085), 'numpy.polyfit', 'numpy.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (1076, 1085), False, 'import numpy\n'), ((1109, 1188), 'pylab.plot', 'pylab.plot', (['sizes', 'particles'], {'linestyle': '""""""', 'marker': '"""."""', 'label': '"""Experiment data"""'}), "(sizes, particles, linestyle='', marker='.', label='Experiment data')\n", (1119, 1188), False, 'import pylab\n'), ((1329, 1367), 'pylab.xlabel', 'pylab.xlabel', (['"""Size of the aggregates"""'], {}), "('Size of the aggregates')\n", (1341, 1367), False, 'import pylab\n'), ((1368, 1403), 'pylab.ylabel', 'pylab.ylabel', (['"""Number of particles"""'], {}), "('Number of particles')\n", (1380, 1403), False, 'import pylab\n'), ((1404, 1453), 'pylab.title', 'pylab.title', (['"""Fractal dimension of the aggregate"""'], {}), "('Fractal dimension of the aggregate')\n", (1415, 1453), False, 'import pylab\n'), ((1454, 1468), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (1466, 1468), False, 'import pylab\n'), ((1493, 1509), 'pylab.semilogy', 'pylab.semilogy', ([], {}), '()\n', (1507, 1509), False, 'import pylab\n'), ((1510, 1526), 'pylab.semilogx', 'pylab.semilogx', ([], {}), '()\n', (1524, 1526), False, 'import pylab\n'), ((1546, 1558), 'pylab.show', 'pylab.show', ([], {}), '()\n', (1556, 1558), False, 'import pylab\n'), ((1229, 1240), 'math.exp', 'math.exp', (['b'], {}), '(b)\n', (1237, 1240), False, 'import math\n'), ((1243, 1261), 'numpy.array', 'numpy.array', (['sizes'], {}), '(sizes)\n', (1254, 1261), False, 'import numpy\n')] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
# ==============================================================================
# 2019_10_03 LSW@NCHC.
#
# Add reading alarm, roi box (x,y) from extra configure file.
# NOTE that, during Popen this RG with parameter, the change happened in ROOT
# "classify.py", who calls RG.exe.
#
# USAGE: time py classify.py --image_file 002051live_201703150917.jpg 2>&-
# ==============================================================================
"""Modified Simple image classification with Inception.
The new Inception-v3 model was retrained on HuWei CCTV dataset.
NOTE: pyinstaller this classify.py to classify.exe before you use it.
"""
import os.path
import re
import sys
import tarfile
import subprocess
import numpy as np
import shlex # for readline list
# pylint: disable=unused-import,g-bad-import-order
import tensorflow as tf
import tensorflow.python.platform
from tensorflow.python.platform import gfile
from six.moves import urllib
# pylint: enable=unused-import,g-bad-import-order
# Get the input image file name
exe_name = sys.argv[0]
in_image = sys.argv[2]
print("This image is:", in_image)
basename = os.path.splitext(in_image)[0]
print(basename)
# Add {cam_roi_file}.cfg to read the (x,y)
cam_roi_file_name = sys.argv[3]
print("Use cam roi from:", cam_roi_file_name)
f = open(cam_roi_file_name, "r")
fp = f.readline()
fp = shlex.split(fp)
# Debug
#fps = [i.split() for i in fp]
#print("cam roi :", fp)
#print("len of fp :", len(fp))
#print("fp[0]", fp[0])
#print("fp[1]", fp[1])
#print("fp[2]", fp[2])
#print("fp[17]", fp[17])
#exit()
# Set output name
out_infer_name = basename + ".inf"
out_segimg_name = basename + "_" + "seg" + ".jpg"
# Debug
#dist = 1.3
#SegPara = "30 30 120 200 72 150 70 50 70 70 70 90 70 110 70 130 20"
#print(SegPara)
#sys.exit()
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
tf.app.flags.DEFINE_string(
'model_dir', '/home/lsw/Images/HuWeiSP/',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
""""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'output_graph_HW.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
# LSW # ADD for save inference reuslt to text file
text_file = open(out_infer_name,"w")
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
#text_file.write('%s\n' % (human_string))
text_file.write('%s (score = %.5f)\n' % (human_string, score))
text_file.close()
# LSW # Add parse the inference reuslt is rain or not.
f_open_infer_file = open(out_infer_name, "r")
infer_line = f_open_infer_file.readline().splitlines() # readline will add a newline to readed line.
print("The infer is : ", infer_line, "end")
s1 = infer_line[0][:4]
s2 = "Rain"
print((str(s1) == "Rain"))
print("print s1:", s1)
if (str(s1) == str("Rain")):
print("Do image segmentation.")
#p = subprocess.Popen(['/usr/bin/python3.5', 'my_region_growing_cv2.py', in_image, '1.3', '30', '30', '120', '200', '72', '150', '70', '50', '70', '70', '70', '90', '70', '110', '70', '130', '20'], stdout = subprocess.PIPE, stderr=subprocess.PIPE)
#p = subprocess.Popen(['./RG.exe', in_image, '1.3', '30', '30', '120', '200', '72', '150', '70', '50', '70', '70', '70', '90', '70', '110', '70', '130', '20'], stdout = subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(['./RG.exe', in_image, fp[0], fp[1] , fp[2], fp[3], fp[4], fp[5],fp[6], fp[7], fp[8], fp[9], fp[10], fp[11], fp[12],fp[13],fp[14],fp[15],fp[16],fp[17]], stdout = subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print(stdout, stderr)
else:
print("No rain go to new image.", infer_line)
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,
reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
#maybe_download_and_extract()
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
run_inference_on_image(image)
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.python.platform.gfile.FastGFile",
"subprocess.Popen",
"tensorflow.python.platform.gfile.GFile",
"tarfile.open",
"tensorflow.logging.fatal",
"shlex.split",
"tensorflow.Session",
"tensorflow.python.platform.gfile.Exists",
"six.moves.urllib.request.urlretrieve",
"sys.stdout.flush",
"ten... | [((2622, 2637), 'shlex.split', 'shlex.split', (['fp'], {}), '(fp)\n', (2633, 2637), False, 'import shlex\n'), ((3394, 3602), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_dir"""', '"""/home/lsw/Images/HuWeiSP/"""', '"""Path to classify_image_graph_def.pb, imagenet_synset_to_human_label_map.txt, and imagenet_2012_challenge_label_map_proto.pbtxt."""'], {}), "('model_dir', '/home/lsw/Images/HuWeiSP/',\n 'Path to classify_image_graph_def.pb, imagenet_synset_to_human_label_map.txt, and imagenet_2012_challenge_label_map_proto.pbtxt.'\n )\n", (3420, 3602), True, 'import tensorflow as tf\n'), ((3629, 3705), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_file"""', '""""""', '"""Absolute path to image file."""'], {}), "('image_file', '', 'Absolute path to image file.')\n", (3655, 3705), True, 'import tensorflow as tf\n'), ((3737, 3828), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_top_predictions"""', '(5)', '"""Display this many predictions."""'], {}), "('num_top_predictions', 5,\n 'Display this many predictions.')\n", (3764, 3828), True, 'import tensorflow as tf\n'), ((10591, 10603), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (10601, 10603), True, 'import tensorflow as tf\n'), ((5256, 5285), 're.compile', 're.compile', (['"""[n\\\\d]*[ \\\\S,]*"""'], {}), "('[n\\\\d]*[ \\\\S,]*')\n", (5266, 5285), False, 'import re\n'), ((6602, 6615), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (6613, 6615), True, 'import tensorflow as tf\n'), ((6664, 6703), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (6683, 6703), True, 'import tensorflow as tf\n'), ((6848, 6867), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['image'], {}), '(image)\n', (6860, 6867), False, 'from tensorflow.python.platform import gfile\n'), ((6873, 6922), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'image'], {}), "('File does not exist %s', image)\n", (6889, 6922), True, 'import tensorflow as tf\n'), ((7039, 7051), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7049, 7051), True, 'import tensorflow as tf\n'), ((7679, 7702), 'numpy.squeeze', 'np.squeeze', (['predictions'], {}), '(predictions)\n', (7689, 7702), True, 'import numpy as np\n'), ((10072, 10140), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['DATA_URL', 'filepath'], {'reporthook': '_progress'}), '(DATA_URL, filepath, reporthook=_progress)\n', (10098, 10140), False, 'from six.moves import urllib\n'), ((4887, 4916), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['uid_lookup_path'], {}), '(uid_lookup_path)\n', (4899, 4916), False, 'from tensorflow.python.platform import gfile\n'), ((4924, 4983), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'uid_lookup_path'], {}), "('File does not exist %s', uid_lookup_path)\n", (4940, 4983), True, 'import tensorflow as tf\n'), ((4995, 5026), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['label_lookup_path'], {}), '(label_lookup_path)\n', (5007, 5026), False, 'from tensorflow.python.platform import gfile\n'), ((5034, 5095), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'label_lookup_path'], {}), "('File does not exist %s', label_lookup_path)\n", (5050, 5095), True, 'import tensorflow as tf\n'), ((6938, 6966), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['image', '"""rb"""'], {}), "(image, 'rb')\n", (6953, 6966), False, 'from tensorflow.python.platform import gfile\n'), ((9185, 9421), 'subprocess.Popen', 'subprocess.Popen', (["['./RG.exe', in_image, fp[0], fp[1], fp[2], fp[3], fp[4], fp[5], fp[6], fp[\n 7], fp[8], fp[9], fp[10], fp[11], fp[12], fp[13], fp[14], fp[15], fp[16\n ], fp[17]]"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['./RG.exe', in_image, fp[0], fp[1], fp[2], fp[3], fp[4],\n fp[5], fp[6], fp[7], fp[8], fp[9], fp[10], fp[11], fp[12], fp[13], fp[\n 14], fp[15], fp[16], fp[17]], stdout=subprocess.PIPE, stderr=subprocess\n .PIPE)\n", (9201, 9421), False, 'import subprocess\n'), ((10035, 10053), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10051, 10053), False, 'import sys\n'), ((10307, 10337), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (10319, 10337), False, 'import tarfile\n'), ((5185, 5213), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['uid_lookup_path'], {}), '(uid_lookup_path)\n', (5196, 5213), False, 'from tensorflow.python.platform import gfile\n'), ((5566, 5596), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['label_lookup_path'], {}), '(label_lookup_path)\n', (5577, 5596), False, 'from tensorflow.python.platform import gfile\n'), ((6087, 6132), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Failed to locate: %s"""', 'val'], {}), "('Failed to locate: %s', val)\n", (6103, 6132), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transport22.csv', sep=';')
df = df.T
df = df.rename(columns={0: "rok", 1: "jednostka", 2: "wartosc"},
index={"motocykle ogółem": "motocykle",
"motocykle ogółem.1": "motocykle",
"motocykle ogółem.2": "motocykle",
"motocykle ogółem.3": "motocykle",
"motocykle ogółem.4": "motocykle",
"motocykle ogółem.5": "motocykle",
"motocykle ogółem.6": "motocykle",
"samochody osobowe": "samochody",
"samochody osobowe.1": "samochody",
"samochody osobowe.2": "samochody",
"samochody osobowe.3": "samochody",
"samochody osobowe.4": "samochody",
"samochody osobowe.5": "samochody",
"samochody osobowe.6": "samochody",
"autobusy ogółem": "autobusy",
"autobusy ogółem.1": "autobusy",
"autobusy ogółem.2": "autobusy",
"autobusy ogółem.3": "autobusy",
"autobusy ogółem.4": "autobusy",
"autobusy ogółem.5": "autobusy",
"autobusy ogółem.6": "autobusy"})
df.reset_index(inplace=True)
df = df.rename(columns={"index": "typ"})
df["wartosc"] = df["wartosc"].str.replace(" ", "")
df["wartosc"] = df["wartosc"].apply(pd.to_numeric)
#wybieram 2010 i 2011
moto = df[df['typ']=="motocykle"]
moto2010 = moto[moto['rok']=='2010']['wartosc']
moto2011 = moto[moto['rok']=='2011']['wartosc']
samo = df[df['typ']=="samochody"]
samo2010 = samo[samo['rok']=='2010']['wartosc']
samo2011 = samo[samo['rok']=='2011']['wartosc']
auto = df[df['typ']=="autobusy"]
auto2010 = auto[auto['rok']=='2010']['wartosc']
auto2011 = auto[auto['rok']=='2011']['wartosc']
rok2010wartosci = np.array([float(moto2010), float(samo2010), float(auto2010)])
rok2011wartosci = np.array([float(moto2011), float(samo2011), float(auto2011)])
nazwy = np.array(['motocykle', 'samochody', 'autobusy'])
fig, axs = plt.subplots(2)
axs[0].pie(rok2010wartosci, labels=nazwy)
axs[0].set_title("rok 2010")
axs[1].pie(rok2011wartosci, labels=nazwy)
axs[1].set_title("rok 2011")
plt.legend()
plt.savefig('zad3.jpg') | [
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((82, 121), 'pandas.read_csv', 'pd.read_csv', (['"""transport22.csv"""'], {'sep': '""";"""'}), "('transport22.csv', sep=';')\n", (93, 121), True, 'import pandas as pd\n'), ((2183, 2231), 'numpy.array', 'np.array', (["['motocykle', 'samochody', 'autobusy']"], {}), "(['motocykle', 'samochody', 'autobusy'])\n", (2191, 2231), True, 'import numpy as np\n'), ((2246, 2261), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (2258, 2261), True, 'import matplotlib.pyplot as plt\n'), ((2409, 2421), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2419, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2423, 2446), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""zad3.jpg"""'], {}), "('zad3.jpg')\n", (2434, 2446), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright (c) 2020 CNES
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import pickle
import unittest
import numpy as np
import pyinterp.core as core
class TestGrid4D(unittest.TestCase):
"""Test of the C+++/Python interface of the pyinterp::Grid4DFloat64
class"""
@staticmethod
def f4d(x, y, z, u):
return u * np.exp(-x**2 - y**2 - z**2)
def load_data(self):
x = np.arange(-1, 1, 0.2)
y = np.arange(-1, 1, 0.2)
z = np.arange(-1, 1, 0.2)
u = np.arange(-1, 10, 0.2)
mx, my, mz, mu = np.meshgrid(x, y, z, u)
return core.Grid4DFloat64(core.Axis(x), core.Axis(y), core.Axis(z),
core.Axis(u), self.f4d(mx, my, mz, mu))
def test_grid4d_init(self):
"""Test construction and accessors of the object"""
grid = self.load_data()
self.assertIsInstance(grid.x, core.Axis)
self.assertIsInstance(grid.y, core.Axis)
self.assertIsInstance(grid.z, core.Axis)
self.assertIsInstance(grid.u, core.Axis)
self.assertIsInstance(grid.array, np.ndarray)
def test_grid4d_pickle(self):
"""Serialization test"""
grid = self.load_data()
other = pickle.loads(pickle.dumps(grid))
self.assertEqual(grid.x, other.x)
self.assertEqual(grid.y, other.y)
self.assertEqual(grid.z, other.z)
self.assertEqual(grid.u, other.u)
self.assertTrue(np.all(grid.array == other.array))
def test_interpolator(self):
grid = self.load_data()
x = np.arange(-1, 1, 0.2)
y = np.arange(-1, 1, 0.2)
z = np.arange(-1, 1, 0.2)
u = np.arange(-1, 10, 0.2)
mx, my, mz, mu = np.meshgrid(x, y, z, u)
expected = self.f4d(mx, my, mz, mu)
interpolator = core.Bilinear3D()
calculated = core.quadrivariate_float64(grid,
mx.flatten(),
my.flatten(),
mz.flatten(),
mu.flatten(),
interpolator,
num_threads=0,
bounds_error=True)
self.assertTrue(np.all(expected.flatten() == calculated))
x = np.arange(-1, 1, 0.2)
y = np.arange(-1, 1, 0.2)
z = np.arange(-1, 1, 0.2)
u = np.arange(-1, 10, 0.33333)
mx, my, mz, mu = np.meshgrid(x, y, z, u)
expected = self.f4d(mx, my, mz, mu)
interpolator = core.Bilinear3D()
calculated = core.quadrivariate_float64(grid,
mx.flatten(),
my.flatten(),
mz.flatten(),
mu.flatten(),
interpolator,
num_threads=0,
bounds_error=False)
self.assertAlmostEqual(np.nanstd(expected.flatten() - calculated), 0)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.meshgrid",
"pyinterp.core.Bilinear3D",
"pyinterp.core.Axis",
"numpy.arange",
"numpy.exp",
"numpy.all",
"pickle.dumps"
] | [((3343, 3358), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3356, 3358), False, 'import unittest\n'), ((481, 502), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (490, 502), True, 'import numpy as np\n'), ((515, 536), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (524, 536), True, 'import numpy as np\n'), ((549, 570), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (558, 570), True, 'import numpy as np\n'), ((583, 605), 'numpy.arange', 'np.arange', (['(-1)', '(10)', '(0.2)'], {}), '(-1, 10, 0.2)\n', (592, 605), True, 'import numpy as np\n'), ((632, 655), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z', 'u'], {}), '(x, y, z, u)\n', (643, 655), True, 'import numpy as np\n'), ((1636, 1657), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (1645, 1657), True, 'import numpy as np\n'), ((1670, 1691), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (1679, 1691), True, 'import numpy as np\n'), ((1704, 1725), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (1713, 1725), True, 'import numpy as np\n'), ((1738, 1760), 'numpy.arange', 'np.arange', (['(-1)', '(10)', '(0.2)'], {}), '(-1, 10, 0.2)\n', (1747, 1760), True, 'import numpy as np\n'), ((1787, 1810), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z', 'u'], {}), '(x, y, z, u)\n', (1798, 1810), True, 'import numpy as np\n'), ((1879, 1896), 'pyinterp.core.Bilinear3D', 'core.Bilinear3D', ([], {}), '()\n', (1894, 1896), True, 'import pyinterp.core as core\n'), ((2471, 2492), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (2480, 2492), True, 'import numpy as np\n'), ((2505, 2526), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (2514, 2526), True, 'import numpy as np\n'), ((2539, 2560), 'numpy.arange', 'np.arange', (['(-1)', '(1)', '(0.2)'], {}), '(-1, 1, 0.2)\n', (2548, 2560), True, 'import numpy as np\n'), ((2573, 2599), 'numpy.arange', 'np.arange', (['(-1)', '(10)', '(0.33333)'], {}), '(-1, 10, 0.33333)\n', (2582, 2599), True, 'import numpy as np\n'), ((2626, 2649), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z', 'u'], {}), '(x, y, z, u)\n', (2637, 2649), True, 'import numpy as np\n'), ((2718, 2735), 'pyinterp.core.Bilinear3D', 'core.Bilinear3D', ([], {}), '()\n', (2733, 2735), True, 'import pyinterp.core as core\n'), ((415, 448), 'numpy.exp', 'np.exp', (['(-x ** 2 - y ** 2 - z ** 2)'], {}), '(-x ** 2 - y ** 2 - z ** 2)\n', (421, 448), True, 'import numpy as np\n'), ((690, 702), 'pyinterp.core.Axis', 'core.Axis', (['x'], {}), '(x)\n', (699, 702), True, 'import pyinterp.core as core\n'), ((704, 716), 'pyinterp.core.Axis', 'core.Axis', (['y'], {}), '(y)\n', (713, 716), True, 'import pyinterp.core as core\n'), ((718, 730), 'pyinterp.core.Axis', 'core.Axis', (['z'], {}), '(z)\n', (727, 730), True, 'import pyinterp.core as core\n'), ((766, 778), 'pyinterp.core.Axis', 'core.Axis', (['u'], {}), '(u)\n', (775, 778), True, 'import pyinterp.core as core\n'), ((1310, 1328), 'pickle.dumps', 'pickle.dumps', (['grid'], {}), '(grid)\n', (1322, 1328), False, 'import pickle\n'), ((1522, 1555), 'numpy.all', 'np.all', (['(grid.array == other.array)'], {}), '(grid.array == other.array)\n', (1528, 1555), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
"""
#%% Import necessary modules and packages
import numpy as np
from scipy import linalg
from scipy import signal
from matplotlib import pyplot as plt
#%% Load wind velocity and plot
plt.close('all')
t=np.load('ProblemSet5_task2_t.npy')
V=np.load('ProblemSet5_task2_V.npy')
# Plot some of the wind time series
plt.figure()
plt.plot(t[0],V[0,:])
plt.plot(t[0],V[10,:])
plt.plot(t[0],V[20,:])
plt.show()
plt.xlabel('t [s]')
plt.ylabel('V [m/s]')
plt.grid()
plt.xlim(0,600)
| [
"matplotlib.pyplot.xlim",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((217, 233), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (226, 233), True, 'from matplotlib import pyplot as plt\n'), ((237, 271), 'numpy.load', 'np.load', (['"""ProblemSet5_task2_t.npy"""'], {}), "('ProblemSet5_task2_t.npy')\n", (244, 271), True, 'import numpy as np\n'), ((274, 308), 'numpy.load', 'np.load', (['"""ProblemSet5_task2_V.npy"""'], {}), "('ProblemSet5_task2_V.npy')\n", (281, 308), True, 'import numpy as np\n'), ((356, 368), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (366, 368), True, 'from matplotlib import pyplot as plt\n'), ((369, 392), 'matplotlib.pyplot.plot', 'plt.plot', (['t[0]', 'V[0, :]'], {}), '(t[0], V[0, :])\n', (377, 392), True, 'from matplotlib import pyplot as plt\n'), ((391, 415), 'matplotlib.pyplot.plot', 'plt.plot', (['t[0]', 'V[10, :]'], {}), '(t[0], V[10, :])\n', (399, 415), True, 'from matplotlib import pyplot as plt\n'), ((414, 438), 'matplotlib.pyplot.plot', 'plt.plot', (['t[0]', 'V[20, :]'], {}), '(t[0], V[20, :])\n', (422, 438), True, 'from matplotlib import pyplot as plt\n'), ((437, 447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (445, 447), True, 'from matplotlib import pyplot as plt\n'), ((448, 467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (458, 467), True, 'from matplotlib import pyplot as plt\n'), ((468, 489), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""V [m/s]"""'], {}), "('V [m/s]')\n", (478, 489), True, 'from matplotlib import pyplot as plt\n'), ((490, 500), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (498, 500), True, 'from matplotlib import pyplot as plt\n'), ((501, 517), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(600)'], {}), '(0, 600)\n', (509, 517), True, 'from matplotlib import pyplot as plt\n')] |
import sys
import os
import warnings
import itertools
import time
import glob
import urllib.request
import argparse
import numpy as np
import torch
files = {
"config.json": "https://huggingface.co/sentence-transformers/paraphrase-distilroberta-base-v1/resolve/main/config.json",
"pytorch_model.bin": "https://huggingface.co/sentence-transformers/paraphrase-distilroberta-base-v1/resolve/main/pytorch_model.bin",
}
weights_dir = "pretrained_weights"
def download_file(url, fn):
def report_hook(count, block_size, total_size):
duration = int(time.time() - start_time)
progress_size = int(count * block_size / (1024 ** 2))
percent = min(int(count * block_size * 100 / total_size), 100)
prog_bar = "|" + "#" * int(percent / 2) + "-" * (50 - int(percent / 2)) + "|"
sys.stdout.write(
f"\r{prog_bar} {percent}%, {progress_size} MB, {duration}s elapsed"
)
sys.stdout.flush()
if os.path.exists(fn):
warnings.warn(f"File '{fn}' already exists, skipping download")
else:
print(f"\n\nDownloading {fn} from {url}\n")
start_time = time.time()
urllib.request.urlretrieve(url, fn, report_hook)
def extract_weights(model, weights_dir):
for name, weights in model.items():
weights = np.array(weights).astype(np.float32)
np.save(f"./{weights_dir}/{name}.npy", weights)
def process_weights(weights_dir):
# Combine layernorm weights and bias to single file
layernorm_files = glob.glob(f"./{weights_dir}/*LayerNorm*.npy")
layernorm_groups = {}
for fn in layernorm_files:
base_fn = fn.split(".LayerNorm")[0]
if base_fn in layernorm_groups:
layernorm_groups[base_fn].append(fn)
else:
layernorm_groups[base_fn] = [fn]
for base_fn, fns in layernorm_groups.items():
weight_fn = [fn for fn in fns if "weight.npy" in fn][0]
bias_fn = [fn for fn in fns if "bias.npy" in fn][0]
weight_bias_vals = np.stack([np.load(weight_fn), np.load(bias_fn)]).T.copy()
np.save(f"{base_fn}.layernorm.weightbias.npy", weight_bias_vals)
# Transpose embedding layer weights
embed_files = [
glob.glob(f"{weights_dir}/{e}.npy")
for e in (
"*position_embeddings*",
"*token_type_embeddings*",
"*word_embeddings*",
)
]
embed_files = itertools.chain(*embed_files)
for fn in embed_files:
np.save(fn, np.load(fn).T.copy())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--no-weights', action='store_true', help='avoids downloading model weights')
args = parser.parse_args()
if args.no_weights:
del files['pytorch_model.bin']
"""Download model from huggingface"""
for fn, url in files.items():
download_file(url, fn)
if not args.no_weights:
""" Extract weights """
if not os.path.exists(weights_dir):
os.makedirs(weights_dir)
model = torch.load("pytorch_model.bin", map_location="cpu")
extract_weights(model, weights_dir)
""" Process weights for loading into LBANN """
process_weights(weights_dir)
| [
"sys.stdout.write",
"numpy.load",
"numpy.save",
"argparse.ArgumentParser",
"os.makedirs",
"torch.load",
"os.path.exists",
"time.time",
"sys.stdout.flush",
"numpy.array",
"glob.glob",
"warnings.warn",
"itertools.chain"
] | [((960, 978), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (974, 978), False, 'import os\n'), ((1512, 1557), 'glob.glob', 'glob.glob', (['f"""./{weights_dir}/*LayerNorm*.npy"""'], {}), "(f'./{weights_dir}/*LayerNorm*.npy')\n", (1521, 1557), False, 'import glob\n'), ((2408, 2437), 'itertools.chain', 'itertools.chain', (['*embed_files'], {}), '(*embed_files)\n', (2423, 2437), False, 'import itertools\n'), ((2549, 2574), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2572, 2574), False, 'import argparse\n'), ((817, 907), 'sys.stdout.write', 'sys.stdout.write', (["f'\\r{prog_bar} {percent}%, {progress_size} MB, {duration}s elapsed'"], {}), "(\n f'\\r{prog_bar} {percent}%, {progress_size} MB, {duration}s elapsed')\n", (833, 907), False, 'import sys\n'), ((933, 951), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (949, 951), False, 'import sys\n'), ((988, 1051), 'warnings.warn', 'warnings.warn', (['f"""File \'{fn}\' already exists, skipping download"""'], {}), '(f"File \'{fn}\' already exists, skipping download")\n', (1001, 1051), False, 'import warnings\n'), ((1135, 1146), 'time.time', 'time.time', ([], {}), '()\n', (1144, 1146), False, 'import time\n'), ((1350, 1397), 'numpy.save', 'np.save', (['f"""./{weights_dir}/{name}.npy"""', 'weights'], {}), "(f'./{weights_dir}/{name}.npy', weights)\n", (1357, 1397), True, 'import numpy as np\n'), ((2076, 2140), 'numpy.save', 'np.save', (['f"""{base_fn}.layernorm.weightbias.npy"""', 'weight_bias_vals'], {}), "(f'{base_fn}.layernorm.weightbias.npy', weight_bias_vals)\n", (2083, 2140), True, 'import numpy as np\n'), ((2210, 2245), 'glob.glob', 'glob.glob', (['f"""{weights_dir}/{e}.npy"""'], {}), "(f'{weights_dir}/{e}.npy')\n", (2219, 2245), False, 'import glob\n'), ((3038, 3089), 'torch.load', 'torch.load', (['"""pytorch_model.bin"""'], {'map_location': '"""cpu"""'}), "('pytorch_model.bin', map_location='cpu')\n", (3048, 3089), False, 'import torch\n'), ((2956, 2983), 'os.path.exists', 'os.path.exists', (['weights_dir'], {}), '(weights_dir)\n', (2970, 2983), False, 'import os\n'), ((2997, 3021), 'os.makedirs', 'os.makedirs', (['weights_dir'], {}), '(weights_dir)\n', (3008, 3021), False, 'import os\n'), ((564, 575), 'time.time', 'time.time', ([], {}), '()\n', (573, 575), False, 'import time\n'), ((1305, 1322), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (1313, 1322), True, 'import numpy as np\n'), ((2485, 2496), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (2492, 2496), True, 'import numpy as np\n'), ((2020, 2038), 'numpy.load', 'np.load', (['weight_fn'], {}), '(weight_fn)\n', (2027, 2038), True, 'import numpy as np\n'), ((2040, 2056), 'numpy.load', 'np.load', (['bias_fn'], {}), '(bias_fn)\n', (2047, 2056), True, 'import numpy as np\n')] |
#
# Packt Publishing
# Hands-on Tensorflow Lite for Intelligent Mobile Apps
# @author: <NAME>
#
# Section 3: Handwriting recognition
# Video 3-3: Parameter study
#
import tensorflow as tf
import numpy as np
from scipy.io import loadmat
import itertools
# Import E-MNIST data
mydata = loadmat("matlab/emnist-balanced.mat")
b_size = 10
img_height = 28
img_width = 28
classes = 47
epochs = 1
def hotvector(vector):
''' This function will transform a vector of labels into a vector of
one-hot vectors.
'''
result = np.zeros((vector.shape[0],classes))
for i in range(vector.shape[0]):
result[i][vector[i]]=1
return result
def thresholdvector(vector):
''' This function will threshold a vector: 0 and non-zero
Non-zero values will be transformed into 1's
'''
for i in range(vector.shape[0]):
vector[i]=1.0*(vector[i]>0)
return vector
def getModel(config):
init,lr,_ = config
# Input: 28x28
xi = tf.placeholder(tf.float32,[None, img_height*img_width],name="inputX")
yi = tf.placeholder(tf.float32,[None, classes],name="outputY")
x = tf.reshape(xi, [-1, img_height, img_width, 1])
with tf.variable_scope("conv1") as scope:
# First 2D convolution,
W = tf.get_variable("W",shape=[3,3,1,32],initializer=init)
b = tf.get_variable("b",initializer=tf.zeros([32]))
conv = tf.nn.conv2d(x,W,strides=[1, 1, 1, 1],padding="VALID")
pre_act = tf.nn.bias_add(conv,b)
act = tf.nn.relu(pre_act)
with tf.variable_scope("conv2") as scope:
W = tf.get_variable("W",shape=[3,3,32,64],initializer=init)
b = tf.get_variable("b",initializer=tf.zeros([64]))
conv = tf.nn.conv2d(act,W,strides=[1, 1, 1, 1],padding="VALID")
pre_act = tf.nn.bias_add(conv,b)
act = tf.nn.relu(pre_act)
# Maxpooling
l3_mp = tf.nn.max_pool(act,[1,2,2,1],strides=[1,2,2,1],padding="VALID")
# Dense
l4 = tf.reshape(l3_mp,[-1, 12*12*64])
with tf.variable_scope("dense1") as scope:
W = tf.get_variable("W",shape=[12*12*64,128],initializer=init)
b = tf.get_variable("b",initializer=tf.zeros([128]))
dense = tf.matmul(l4,W)+b
act = tf.nn.relu(dense)
with tf.variable_scope("dense2") as scope:
W = tf.get_variable("W",shape=[128,classes],initializer=init)
b = tf.get_variable("b",initializer=tf.zeros([classes]))
dense = tf.matmul(act,W)+b
# Prediction. We actually don't ned it
eval_pred = tf.nn.softmax(dense,name="prediction")
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=dense,labels=yi)
cost = tf.reduce_mean(cross_entropy)
train_step = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
saver = tf.train.Saver()
return (xi,yi),train_step,cost,eval_pred,saver
initP = [tf.contrib.layers.xavier_initializer(),tf.keras.initializers.he_uniform()]
lrP = np.linspace(1e-5,1,10)
batchP = [8,16,32,64]
parameters = [initP,lrP,batchP]
allConfgs = list(itertools.product(*parameters))
n_times = 5
counter=0
for conf in allConfgs:
counter+=1
print(counter,len(allConfgs))
best_acc = 0
(xi,yi),optimizer,cost,eval_pred,saver = getModel(conf)
b_size = conf[-1]
for t in range(n_times):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training
for i in range(epochs):
# Batches
for j in range(0,mydata["dataset"]["train"][0][0][0][0][1].shape[0],b_size):
x_raw = thresholdvector(mydata["dataset"]["train"][0][0][0][0][0][j:j+b_size])
y_raw = hotvector(mydata["dataset"]["train"][0][0][0][0][1][j:j+b_size])
[la,c]=sess.run([optimizer,cost], feed_dict={xi: x_raw, yi: y_raw})
## Saving the graph for later
#saver.save(sess, 'tmp/my-weights')
#g = sess.graph
#gdef = g.as_graph_def()
#tf.train.write_graph(gdef,"tmp","graph.pb",False)
# Testing
c=0;g=0
for i in range(mydata["dataset"]["test"][0][0][0][0][1].shape[0]):
x_raw = thresholdvector(mydata["dataset"]["test"][0][0][0][0][0][i:i+1]) # It will just have the proper shape
y_raw = hotvector(mydata["dataset"]["test"][0][0][0][0][1][i:i+1])
pred=sess.run(eval_pred,feed_dict={xi: x_raw})
if np.argmax(y_raw)==np.argmax(pred):
g+=1
c+=1
acc=1.0*g/c
print(acc)
if best_acc<acc:
best_acc=acc
print("Best accuracy: "+str(best_acc))
f=open("logRes","a")
f.write("{0},{1},{2},{3}\n".format(str(conf[0]),conf[1],conf[2],str(best_acc)))
f.close()
tf.reset_default_graph()
| [
"tensorflow.contrib.layers.xavier_initializer",
"scipy.io.loadmat",
"tensorflow.keras.initializers.he_uniform",
"tensorflow.reset_default_graph",
"numpy.argmax",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.nn.... | [((286, 323), 'scipy.io.loadmat', 'loadmat', (['"""matlab/emnist-balanced.mat"""'], {}), "('matlab/emnist-balanced.mat')\n", (293, 323), False, 'from scipy.io import loadmat\n'), ((2715, 2740), 'numpy.linspace', 'np.linspace', (['(1e-05)', '(1)', '(10)'], {}), '(1e-05, 1, 10)\n', (2726, 2740), True, 'import numpy as np\n'), ((521, 557), 'numpy.zeros', 'np.zeros', (['(vector.shape[0], classes)'], {}), '((vector.shape[0], classes))\n', (529, 557), True, 'import numpy as np\n'), ((919, 992), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, img_height * img_width]'], {'name': '"""inputX"""'}), "(tf.float32, [None, img_height * img_width], name='inputX')\n", (933, 992), True, 'import tensorflow as tf\n'), ((995, 1054), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, classes]'], {'name': '"""outputY"""'}), "(tf.float32, [None, classes], name='outputY')\n", (1009, 1054), True, 'import tensorflow as tf\n'), ((1059, 1105), 'tensorflow.reshape', 'tf.reshape', (['xi', '[-1, img_height, img_width, 1]'], {}), '(xi, [-1, img_height, img_width, 1])\n', (1069, 1105), True, 'import tensorflow as tf\n'), ((1735, 1807), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['act', '[1, 2, 2, 1]'], {'strides': '[1, 2, 2, 1]', 'padding': '"""VALID"""'}), "(act, [1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n", (1749, 1807), True, 'import tensorflow as tf\n'), ((1815, 1852), 'tensorflow.reshape', 'tf.reshape', (['l3_mp', '[-1, 12 * 12 * 64]'], {}), '(l3_mp, [-1, 12 * 12 * 64])\n', (1825, 1852), True, 'import tensorflow as tf\n'), ((2320, 2359), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['dense'], {'name': '"""prediction"""'}), "(dense, name='prediction')\n", (2333, 2359), True, 'import tensorflow as tf\n'), ((2377, 2441), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'dense', 'labels': 'yi'}), '(logits=dense, labels=yi)\n', (2416, 2441), True, 'import tensorflow as tf\n'), ((2449, 2478), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (2463, 2478), True, 'import tensorflow as tf\n'), ((2558, 2574), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2572, 2574), True, 'import tensorflow as tf\n'), ((2634, 2672), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2670, 2672), True, 'import tensorflow as tf\n'), ((2673, 2707), 'tensorflow.keras.initializers.he_uniform', 'tf.keras.initializers.he_uniform', ([], {}), '()\n', (2705, 2707), True, 'import tensorflow as tf\n'), ((2809, 2839), 'itertools.product', 'itertools.product', (['*parameters'], {}), '(*parameters)\n', (2826, 2839), False, 'import itertools\n'), ((4300, 4324), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4322, 4324), True, 'import tensorflow as tf\n'), ((1113, 1139), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv1"""'], {}), "('conv1')\n", (1130, 1139), True, 'import tensorflow as tf\n'), ((1183, 1242), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[3, 3, 1, 32]', 'initializer': 'init'}), "('W', shape=[3, 3, 1, 32], initializer=init)\n", (1198, 1242), True, 'import tensorflow as tf\n'), ((1302, 1359), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(x, W, strides=[1, 1, 1, 1], padding='VALID')\n", (1314, 1359), True, 'import tensorflow as tf\n'), ((1369, 1392), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (1383, 1392), True, 'import tensorflow as tf\n'), ((1400, 1419), 'tensorflow.nn.relu', 'tf.nn.relu', (['pre_act'], {}), '(pre_act)\n', (1410, 1419), True, 'import tensorflow as tf\n'), ((1428, 1454), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv2"""'], {}), "('conv2')\n", (1445, 1454), True, 'import tensorflow as tf\n'), ((1471, 1531), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[3, 3, 32, 64]', 'initializer': 'init'}), "('W', shape=[3, 3, 32, 64], initializer=init)\n", (1486, 1531), True, 'import tensorflow as tf\n'), ((1591, 1650), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['act', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(act, W, strides=[1, 1, 1, 1], padding='VALID')\n", (1603, 1650), True, 'import tensorflow as tf\n'), ((1660, 1683), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['conv', 'b'], {}), '(conv, b)\n', (1674, 1683), True, 'import tensorflow as tf\n'), ((1691, 1710), 'tensorflow.nn.relu', 'tf.nn.relu', (['pre_act'], {}), '(pre_act)\n', (1701, 1710), True, 'import tensorflow as tf\n'), ((1855, 1882), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dense1"""'], {}), "('dense1')\n", (1872, 1882), True, 'import tensorflow as tf\n'), ((1899, 1964), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[12 * 12 * 64, 128]', 'initializer': 'init'}), "('W', shape=[12 * 12 * 64, 128], initializer=init)\n", (1914, 1964), True, 'import tensorflow as tf\n'), ((2050, 2067), 'tensorflow.nn.relu', 'tf.nn.relu', (['dense'], {}), '(dense)\n', (2060, 2067), True, 'import tensorflow as tf\n'), ((2075, 2102), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dense2"""'], {}), "('dense2')\n", (2092, 2102), True, 'import tensorflow as tf\n'), ((2119, 2179), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[128, classes]', 'initializer': 'init'}), "('W', shape=[128, classes], initializer=init)\n", (2134, 2179), True, 'import tensorflow as tf\n'), ((2024, 2040), 'tensorflow.matmul', 'tf.matmul', (['l4', 'W'], {}), '(l4, W)\n', (2033, 2040), True, 'import tensorflow as tf\n'), ((2247, 2264), 'tensorflow.matmul', 'tf.matmul', (['act', 'W'], {}), '(act, W)\n', (2256, 2264), True, 'import tensorflow as tf\n'), ((2493, 2533), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (2515, 2533), True, 'import tensorflow as tf\n'), ((3056, 3068), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3066, 3068), True, 'import tensorflow as tf\n'), ((1276, 1290), 'tensorflow.zeros', 'tf.zeros', (['[32]'], {}), '([32])\n', (1284, 1290), True, 'import tensorflow as tf\n'), ((1565, 1579), 'tensorflow.zeros', 'tf.zeros', (['[64]'], {}), '([64])\n', (1573, 1579), True, 'import tensorflow as tf\n'), ((1996, 2011), 'tensorflow.zeros', 'tf.zeros', (['[128]'], {}), '([128])\n', (2004, 2011), True, 'import tensorflow as tf\n'), ((2215, 2234), 'tensorflow.zeros', 'tf.zeros', (['[classes]'], {}), '([classes])\n', (2223, 2234), True, 'import tensorflow as tf\n'), ((3090, 3123), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3121, 3123), True, 'import tensorflow as tf\n'), ((4014, 4030), 'numpy.argmax', 'np.argmax', (['y_raw'], {}), '(y_raw)\n', (4023, 4030), True, 'import numpy as np\n'), ((4032, 4047), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (4041, 4047), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
import time
import datetime
import tqdm
import numpy as np
from pathlib import Path
import argparse
import cv2
from pyspark.sql.types import *
from pyspark.sql import SparkSession
sess = tf.Session()
graph = tf.get_default_graph()
OUTPUT_PATH = os.path.join(os.environ['HOME'], "output")
MODEL_PATH = os.path.join(os.environ['HOME'], "models")
VIDEO_PATH = os.path.join(os.environ["HOME"], "videos")
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--video', required=True, dest='video', type=str,
help="Name of video in video folder")
parser.add_argument('-m', '--model', required=True, dest='model', type=str,
help="Name of model in model folder")
args = parser.parse_args()
CV_MODEL = args.model
saver = tf.train.import_meta_graph(os.path.join(MODEL_PATH, CV_MODEL, 'model.ckpt.meta'))
saver.restore(sess, tf.train.latest_checkpoint(os.path.join(MODEL_PATH, CV_MODEL)))
input_tensor = graph.get_tensor_by_name('image_tensor:0')
output_tensors = dict(
bboxes = graph.get_tensor_by_name('detection_boxes:0'),
classes = graph.get_tensor_by_name('detection_classes:0'),
n = graph.get_tensor_by_name('num_detections:0'),
scores = graph.get_tensor_by_name('detection_scores:0'),
)
def pred_from_frame(frames):
"""Takes a list of frames and runs it through our prediction"""
frame = np.stack(frames)
output = sess.run(output_tensors,
feed_dict={input_tensor: frame})
bboxes, scores, n, classes = output['bboxes'], output['scores'], output['n'], output['classes']
return bboxes, scores, n, classes
def process_video(video_path, batch_size=32, rate=2):
split_name = os.path.splitext(os.path.basename(video_path))[0].split('-')
timestamp = '-'.join(split_name[:-1])
fps = int(split_name[-1])
skip = int(fps // rate)
initial = datetime.datetime.strptime(timestamp, '%Y%m%d-%H%M%S')
cap = cv2.VideoCapture(video_path)
all_scores, all_classes, all_n, all_bboxes, timestamps = [], [], [], [], []
start_time = time.time()
video_running = True
processed = 0
while video_running:
frames = []
for _ in range(batch_size):
for _ in range(skip):
ret, frame = cap.read()
if not ret:
print("Video finished")
video_running = False
break
if not video_running:
break
frames.append(frame)
timestamps.append(str(initial + datetime.timedelta(seconds=rate*processed)))
processed += 1
if not frames:
break
bboxes, scores, n, classes = pred_from_frame(frames)
all_scores.append(scores)
all_bboxes.append(bboxes)
all_n.append(n)
all_classes.append(classes)
if not video_running:
break
print('Frames processed: %d' % processed)
print("Total time: {} seconds".format(int(time.time() - start_time)))
full_bboxes = np.row_stack(all_bboxes)
full_scores = np.row_stack(all_scores)
full_classes = np.row_stack(all_classes)
full_n = np.concatenate(all_n, axis=None)
return timestamps, full_bboxes, full_scores, full_n, full_classes
def make_predictions(videoname):
video = os.path.join(VIDEO_PATH, videoname)
BATCH_SIZE = 72
start_time = time.time()
timestamps, bboxes, scores, n, classes = process_video(video, batch_size=BATCH_SIZE)
end_time = time.time()
print('Elapsed: %f' % (end_time - start_time))
# some cleaning
agg_bboxes = []
agg_scores = []
num_frames = len(bboxes)
for ind in range(num_frames):
image_n = int(n[ind])
image_bboxes = bboxes[ind][:image_n]
image_scores = scores[ind][:image_n]
image_classes = classes[ind][:image_n]
indices = image_classes == 1.
image_bboxes = image_bboxes[indices]
image_scores = image_scores[indices]
agg_bboxes.append(image_bboxes.flatten().tolist())
agg_scores.append(image_scores.tolist())
num_detections = [float(len(x)) for x in agg_scores]
pair_bboxes = [[i] + j + k for i, j, k in zip(*(num_detections[:-1], agg_bboxes[:-1], agg_bboxes[1:]))]
agg_bboxes = agg_bboxes[:-1]
agg_scores = agg_scores[:-1]
# Save to Spark dataframe
output_dir = os.path.join(OUTPUT_PATH, os.path.splitext(videoname)[0])
spark = SparkSession.builder.getOrCreate()
schema = StructType([StructField('bboxes', ArrayType(DoubleType()), True),
StructField('scores', ArrayType(DoubleType()), True),
StructField('timestamp', StringType(), True),
StructField('pair_bboxes', ArrayType(DoubleType()), True)])
df = spark.createDataFrame(list(zip(*(agg_bboxes, agg_scores, timestamps, pair_bboxes))), schema)
df.coalesce(1).write.mode('overwrite').json(output_dir)
df1 = spark.read.json(output_dir)
df1.show()
make_predictions(args.video)
| [
"numpy.stack",
"argparse.ArgumentParser",
"pyspark.sql.SparkSession.builder.getOrCreate",
"os.path.basename",
"tensorflow.Session",
"time.time",
"cv2.VideoCapture",
"datetime.datetime.strptime",
"datetime.timedelta",
"numpy.row_stack",
"os.path.splitext",
"tensorflow.get_default_graph",
"os.... | [((278, 290), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (288, 290), True, 'import tensorflow as tf\n'), ((299, 321), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (319, 321), True, 'import tensorflow as tf\n'), ((337, 379), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""output"""'], {}), "(os.environ['HOME'], 'output')\n", (349, 379), False, 'import os\n'), ((393, 435), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""models"""'], {}), "(os.environ['HOME'], 'models')\n", (405, 435), False, 'import os\n'), ((449, 491), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""videos"""'], {}), "(os.environ['HOME'], 'videos')\n", (461, 491), False, 'import os\n'), ((502, 527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (525, 527), False, 'import argparse\n'), ((882, 935), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'CV_MODEL', '"""model.ckpt.meta"""'], {}), "(MODEL_PATH, CV_MODEL, 'model.ckpt.meta')\n", (894, 935), False, 'import os\n'), ((1453, 1469), 'numpy.stack', 'np.stack', (['frames'], {}), '(frames)\n', (1461, 1469), True, 'import numpy as np\n'), ((1937, 1991), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timestamp', '"""%Y%m%d-%H%M%S"""'], {}), "(timestamp, '%Y%m%d-%H%M%S')\n", (1963, 1991), False, 'import datetime\n'), ((2011, 2039), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (2027, 2039), False, 'import cv2\n'), ((2138, 2149), 'time.time', 'time.time', ([], {}), '()\n', (2147, 2149), False, 'import time\n'), ((3110, 3134), 'numpy.row_stack', 'np.row_stack', (['all_bboxes'], {}), '(all_bboxes)\n', (3122, 3134), True, 'import numpy as np\n'), ((3153, 3177), 'numpy.row_stack', 'np.row_stack', (['all_scores'], {}), '(all_scores)\n', (3165, 3177), True, 'import numpy as np\n'), ((3197, 3222), 'numpy.row_stack', 'np.row_stack', (['all_classes'], {}), '(all_classes)\n', (3209, 3222), True, 'import numpy as np\n'), ((3236, 3268), 'numpy.concatenate', 'np.concatenate', (['all_n'], {'axis': 'None'}), '(all_n, axis=None)\n', (3250, 3268), True, 'import numpy as np\n'), ((3385, 3420), 'os.path.join', 'os.path.join', (['VIDEO_PATH', 'videoname'], {}), '(VIDEO_PATH, videoname)\n', (3397, 3420), False, 'import os\n'), ((3459, 3470), 'time.time', 'time.time', ([], {}), '()\n', (3468, 3470), False, 'import time\n'), ((3575, 3586), 'time.time', 'time.time', ([], {}), '()\n', (3584, 3586), False, 'import time\n'), ((4532, 4566), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (4564, 4566), False, 'from pyspark.sql import SparkSession\n'), ((984, 1018), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'CV_MODEL'], {}), '(MODEL_PATH, CV_MODEL)\n', (996, 1018), False, 'import os\n'), ((4488, 4515), 'os.path.splitext', 'os.path.splitext', (['videoname'], {}), '(videoname)\n', (4504, 4515), False, 'import os\n'), ((1779, 1807), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (1795, 1807), False, 'import os\n'), ((3064, 3075), 'time.time', 'time.time', ([], {}), '()\n', (3073, 3075), False, 'import time\n'), ((2622, 2666), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(rate * processed)'}), '(seconds=rate * processed)\n', (2640, 2666), False, 'import datetime\n')] |
import torch
import torch.utils.data as data_utils
import logging
import numpy as np
import cv2
from tqdm import tqdm
logger = logging.getLogger(__name__)
class UCF101(data_utils.Dataset):
def __init__(self, video_list, subset='train', transforms=None, length=1, new_width=171, new_height=128):
super(UCF101, self).__init__()
self.subset = subset
with open(video_list) as f:
lines = f.readlines()
f.close()
self.videos = []
for line in tqdm(lines):
video_file, start_frame_num, class_idx = line.strip().split()
self.videos.append({
'video': video_file,
'start': int(start_frame_num),
'class': int(class_idx)
})
self.transforms = transforms
self.length = length
self.new_width = new_width
self.new_height = new_height
def __len__(self):
return len(self.videos)
def __getitem__(self, item):
info = self.videos[item]
cap = cv2.VideoCapture('{}.avi'.format(info['video']))
if not cap.isOpened():
logger.error('Cannot open video {}'.format(info['video']))
return None, None
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.set(cv2.CAP_PROP_POS_FRAMES, info['start'] - 2)
chunk = []
for i in range(info['start'], min(info['start']+self.length, num_frames)):
ret, frame = cap.read()
if not ret or frame is None:
break
frame = cv2.resize(frame, dsize=(self.new_width, self.new_height))
chunk.append(frame)
for i in range(len(chunk), self.length):
chunk.append(frame)
try:
chunk = np.asarray(chunk, dtype=np.float32)
except:
return None, None
if self.transforms is not None:
chunk = self.transforms(chunk)
labels = np.asarray([info['class']], dtype=np.int64)
return torch.from_numpy(chunk.transpose([3,0,1,2])), torch.tensor(info['class'], dtype=torch.int64)
| [
"tqdm.tqdm",
"numpy.asarray",
"logging.getLogger",
"torch.tensor",
"cv2.resize"
] | [((128, 155), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (145, 155), False, 'import logging\n'), ((503, 514), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (507, 514), False, 'from tqdm import tqdm\n'), ((1946, 1989), 'numpy.asarray', 'np.asarray', (["[info['class']]"], {'dtype': 'np.int64'}), "([info['class']], dtype=np.int64)\n", (1956, 1989), True, 'import numpy as np\n'), ((1559, 1617), 'cv2.resize', 'cv2.resize', (['frame'], {'dsize': '(self.new_width, self.new_height)'}), '(frame, dsize=(self.new_width, self.new_height))\n', (1569, 1617), False, 'import cv2\n'), ((1764, 1799), 'numpy.asarray', 'np.asarray', (['chunk'], {'dtype': 'np.float32'}), '(chunk, dtype=np.float32)\n', (1774, 1799), True, 'import numpy as np\n'), ((2051, 2097), 'torch.tensor', 'torch.tensor', (["info['class']"], {'dtype': 'torch.int64'}), "(info['class'], dtype=torch.int64)\n", (2063, 2097), False, 'import torch\n')] |
#%%
# Importing libraries
import sys
import json
import csv
import pandas as pd
import multiprocessing as mp
import itertools as it
seed_value = 113
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED'] = str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.random.set_seed(seed_value)
from random import shuffle
from numpy.random import randint
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import BatchNormalization, LayerNormalization
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU, ReLU
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Activation
from tensorflow.keras.initializers import RandomNormal
print("- Importing is completed")
#%%
# Read in parameters
with open(sys.argv[1]) as jsonfile:
input_settings = json.load(jsonfile)
module_size = 32
recomb_per_module = 16
batch_size = 64
discriminator_iter_num = 5
discriminator_learning_rate = 0.0002
generator_iter_num = 1
generator_learning_rate = 0.0002
n_epochs = 3
gradient_penalty_weight = 10
outpath = input_settings["oOutputPath"]
outname = input_settings["oOutputName"]
#%%
# File reading
N100_Ad = pd.read_csv(input_settings["oSourceList"]["oN100_Ad"],
delimiter=' ', names = ['SOURCE', 'TARGET'], header = 0)
N90_Ad = pd.read_csv(input_settings["oSourceList"]["oN90_Ad"],
delimiter=' ', names = ['SOURCE', 'TARGET'], header = 0)
N100_Em = pd.read_csv(input_settings["oSourceList"]["oN100_Em"],
delimiter=' ', header = None, index_col= 0)
N90_Em = pd.read_csv(input_settings["oSourceList"]["oN90_Em"],
delimiter=' ', header = None, index_col= 0)
N100_Mod = pd.read_csv(input_settings["oSourceList"]["oN100_Mod"],
delimiter='\t', header = None, index_col= 0)
N90_Mod = pd.read_csv(input_settings["oSourceList"]["oN90_Mod"],
delimiter='\t', header = None, index_col= 0)
print("- Reading is completed")
#%%
# Functions for handling the data processing
def generate_condition_and_linking_set(N_Mod_base, N_Em_base, N_Ad_base, N_Ad_plus_1,
module_th_low, comb_per_module):
n_cpu_worker = mp.cpu_count()
if __name__ == "__main__":
if (n_cpu_worker >= len(N_Mod_base)):
n_workers = len(N_Mod_base)
else:
n_workers = n_cpu_worker
main_data = []
with mp.Pool(processes = n_workers) as pool:
main_data.extend(pool.starmap(handle_module, zip(it.repeat(N_Mod_base),
it.repeat(N_Em_base), it.repeat(N_Ad_base), it.repeat(N_Ad_plus_1),
it.repeat(module_th_low), it.repeat(comb_per_module), range(len(N_Mod_base)))))
flat_list = [item for sublist in main_data for item in sublist]
return flat_list
def handle_module(N_Mod_base, N_Em_base, N_Ad_base, N_Ad_plus_1,
module_th_low, comb_per_module, index):
main_data_set = []
m_size = np.count_nonzero(~np.isnan(N_Mod_base.iloc[index]))
embedding = np.zeros((m_size, module_th_low))
adjacency_plus_1 = np.zeros((m_size, m_size))
adjacency_base = np.zeros((m_size, m_size))
link_ids = np.empty((m_size, m_size), dtype = object)
for j in range(m_size):
embedding[j, :] = np.asarray(N_Em_base.loc[int(N_Mod_base.iloc[index].iloc[j])])
for l in range(m_size):
if (((N_Ad_plus_1["SOURCE"] == int(N_Mod_base.iloc[index].iloc[j]))
& (N_Ad_plus_1["TARGET"] == int(N_Mod_base.iloc[index].iloc[l]))).any()):
adjacency_plus_1[j, l] = 1
adjacency_plus_1[l, j] = 1
if (((N_Ad_base["SOURCE"] == int(N_Mod_base.iloc[index].iloc[j]))
& (N_Ad_base["TARGET"] == int(N_Mod_base.iloc[index].iloc[l]))).any()):
adjacency_base[j, l] = 1
adjacency_base[l, j] = 1
link_ids[j, l] = str(int(N_Mod_base.iloc[index].iloc[j])) + '_' + str(int(N_Mod_base.iloc[index].iloc[l]))
main_data_set.extend(generate_module_combinations(
embedding, adjacency_base, adjacency_plus_1, link_ids, module_th_low, comb_per_module))
print('processed_modules: %d / %d' % (index+1, len(N_Mod_base)))
return main_data_set
def generate_module_combinations(embedding, adjacency_base, adjacency_plus_1,
link_ids, module_th_low, comb_per_module):
all_indeces = list(range(0, np.shape(adjacency_plus_1)[0]))
combinations = list(it.combinations(all_indeces, module_th_low))
index_combinations = random.sample(combinations, comb_per_module)
combinational_data = [None] * len(index_combinations)
for i in range(len(index_combinations)):
embedding_combination = np.take(embedding, index_combinations[i], 0)
adjacency_base_combination = np.take(adjacency_base, index_combinations[i], 0)
adjacency_base_combination = np.take(adjacency_base_combination, index_combinations[i], 1)
adjacency_plus_combination = np.take(adjacency_plus_1, index_combinations[i], 0)
adjacency_plus_combination = np.take(adjacency_plus_combination, index_combinations[i], 1)
linking_combination = np.take(link_ids, index_combinations[i], 0)
linking_combination = np.take(linking_combination, index_combinations[i], 1)
combinational_data[i] = [linking_combination, embedding_combination,
adjacency_base_combination, adjacency_plus_combination]
return combinational_data
#%%
# Functions for building the Descriminator and the Generator models
def build_discriminator(module_dim):
init = RandomNormal(stddev=0.02)
condition_input = Input(shape=(module_dim, module_dim))
condition = Reshape((module_dim, module_dim, 1))(condition_input)
adjacency_input = Input(shape=(module_dim, module_dim))
adjacency = Reshape((module_dim, module_dim, 1))(adjacency_input)
linking_input = Input(shape=(module_dim, module_dim))
linking = Reshape((module_dim, module_dim, 1))(linking_input)
merge = Concatenate()([condition, adjacency, linking])
hidden = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merge)
hidden = LayerNormalization()(hidden, training=True)
hidden = LeakyReLU(alpha=0.2)(hidden)
hidden = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(hidden)
hidden = LayerNormalization()(hidden, training=True)
hidden = LeakyReLU(alpha=0.2)(hidden)
hidden = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(hidden)
hidden = LayerNormalization()(hidden, training=True)
hidden = LeakyReLU(alpha=0.2)(hidden)
hidden = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(hidden)
hidden = LayerNormalization()(hidden, training=True)
hidden = LeakyReLU(alpha=0.2)(hidden)
hidden = Flatten()(hidden)
hidden = Dropout(0.4)(hidden)
out_layer = Dense(1, activation='linear')(hidden)
model = Model([condition_input, adjacency_input, linking_input], out_layer)
return model
def define_encoder_block(layer_in, n_filters, batchnorm=True):
init = RandomNormal(stddev=0.02)
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
if batchnorm:
g = LayerNormalization()(g, training=True)
g = Activation('relu')(g)
return g
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
init = RandomNormal(stddev=0.02)
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
g = LayerNormalization()(g, training=True)
if dropout:
g = Dropout(0.4)(g, training=True)
g = Concatenate()([g, skip_in])
g = Activation('relu')(g)
return g
def build_generator(module_dim):
init = RandomNormal(stddev=0.02)
condition_input = Input(shape=(module_dim, module_dim))
condition = Reshape((module_dim, module_dim, 1))(condition_input)
adjacency_input = Input(shape=(module_dim, module_dim))
adjacency = Reshape((module_dim, module_dim, 1))(adjacency_input)
merge = Concatenate()([condition, adjacency])
e1 = define_encoder_block(merge, 64)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e4)
b = Activation('relu')(b)
d4 = decoder_block(b, e4, 512)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
g = Conv2DTranspose(1, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_layer = Reshape((module_dim, module_dim))(g)
out_layer = Activation('sigmoid')(out_layer)
model = Model([condition_input, adjacency_input], out_layer)
return model
#%%
# Functions for redefining the training and loss calculation
def calculate_discriminator_loss(d_real_output, d_fake_output):
return tf.reduce_mean(d_fake_output) - tf.reduce_mean(d_real_output)
def calculate_generator_loss(d_fake_output, real_image, fake_image):
#l1 = tf.reduce_mean(tf.abs(real_image - fake_image))
#bce = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.SUM)
#l1 = bce(real_image, fake_image).numpy()
wasser = -tf.reduce_mean(d_fake_output)
loss = 1 * wasser
return loss
def generate_fake_samples(g_model, conditions, base_adjacency):
linkings = g_model.predict([conditions, base_adjacency])
return linkings
def calculate_gradient_penalty(d_model, conditions, base_adjacency, real_sample, fake_sample, batch_num):
epsilon = tf.random.uniform(shape=[batch_num, 1, 1], minval = 0, maxval = 1)
interpolate_sample = epsilon * tf.dtypes.cast(real_sample, tf.float32)
interpolate_sample = interpolate_sample + ((1 - epsilon) * fake_sample)
with tf.GradientTape() as gtape:
gtape.watch(interpolate_sample)
d_interpolate_output = d_model([conditions, base_adjacency, interpolate_sample], training=True)
gradients = gtape.gradient(d_interpolate_output, [interpolate_sample])[0]
norm = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def train_discriminator(d_model, g_model, batch_num, d_iter,
conditions, base_adjacency, linkings, d_optimizer, gp_weight):
for i in range(d_iter):
with tf.GradientTape() as gtape:
g_output = g_model([conditions, base_adjacency], training = True)
d_fake_output = d_model([conditions, base_adjacency, g_output], training=True)
d_real_output = d_model([conditions, base_adjacency, linkings], training=True)
d_cost = calculate_discriminator_loss(d_real_output, d_fake_output)
gp = calculate_gradient_penalty(d_model, conditions, base_adjacency, linkings, g_output, batch_num)
d_loss = (d_cost + gp * gp_weight) * 1
d_gradient = gtape.gradient(d_loss, d_model.trainable_variables)
d_optimizer.apply_gradients(zip(d_gradient, d_model.trainable_variables))
return d_loss
def train_generator(d_model, g_model, batch_num, g_iter,
conditions, base_adjacency, linkings, g_optimizer):
for i in range(g_iter):
with tf.GradientTape() as gtape:
g_output = g_model([conditions, base_adjacency], training = True)
d_fake_output = d_model([conditions, base_adjacency, g_output], training=True)
g_loss = calculate_generator_loss(d_fake_output, linkings, g_output)
g_gradient = gtape.gradient(g_loss, g_model.trainable_variables)
g_optimizer.apply_gradients(zip(g_gradient, g_model.trainable_variables))
return g_loss
def train_gan(d_model, g_model, epochs_num, data_train, data_test, batch_num,
d_iter, g_iter, d_optimizer, g_optimizer, gp_weight):
for epoch in range(epochs_num):
print("- Shuffling train")
shuffle(data_train)
print("- Converting train")
data_train_n = np.asarray(data_train)
print("- Slicing train")
conditions_train = np.asarray(data_train_n[:, 1, :, :]).astype('float32')
base_linkings_train = np.asarray(data_train_n[:, 2, :, :]).astype('float32')
linkings_train = np.asarray(data_train_n[:, 3, :, :]).astype('float32')
data_train_n = None
for iteration in range(len(data_train)):
if (iteration * batch_num + batch_num > len(data_train)
or iteration > 2000):
break
condition_batch = conditions_train[iteration * batch_num : (iteration + 1) * batch_num]
adjacency_batch = base_linkings_train[iteration * batch_num : (iteration + 1) * batch_num]
linking_batch = linkings_train[iteration * batch_num : (iteration + 1) * batch_num]
d_loss = train_discriminator(d_model, g_model, batch_num, d_iter,
condition_batch, adjacency_batch, linking_batch, d_optimizer, gp_weight)
g_loss = train_generator(d_model, g_model, batch_num, g_iter,
condition_batch, adjacency_batch, linking_batch, g_optimizer)
print('>%d, %d, d_loss=%.3f, g_loss=%.3f' %(epoch+1, iteration, d_loss, g_loss))
print("- Training is completed")
g_model.save(outpath + outname + '_generator.h5')
generate_results(g_model, data_test)
def run_test_batch(g_model, batch_num, conditions_test,
base_linkings_test, linkings_test):
index = randint(0, len(base_linkings_test) - batch_num)
condition_batch = conditions_test[index : index + batch_num]
linking_batch = linkings_test[index : index + batch_num]
base_batch = base_linkings_test[index : index + batch_num]
filtered_adjacency_list = []
filtered_confidency_list = []
test_sample = generate_fake_samples(g_model, condition_batch, base_batch)
for i in range(len(base_batch)):
link_filter = (base_batch[i].flatten()) != 1
filtered_adjacency_list.extend(((linking_batch[i].flatten())[link_filter]).reshape((-1, 1)))
filtered_confidency_list.extend(((test_sample[i].flatten())[link_filter]).reshape((-1, 1)))
metric3 = tf.keras.metrics.AUC(num_thresholds=50, curve='ROC')
metric3.update_state(filtered_adjacency_list, filtered_confidency_list)
return metric3.result().numpy()
def generate_results(g_model, data_test):
print("- Shuffling test")
shuffle(data_test)
print("- Converting test")
data_test_n = np.asarray(data_test)
print("- Slicing test")
link_ids_test = np.asarray(data_test_n[:, 0, :, :]).astype('str')
conditions_test = np.asarray(data_test_n[:, 1, :, :]).astype('float32')
base_linkings_test = np.asarray(data_test_n[:, 2, :, :]).astype('float32')
data_test_n = None
filtered_ids_list = []
filtered_confidency_list = []
test_sample = generate_fake_samples(g_model, conditions_test, base_linkings_test)
for i in range(len(base_linkings_test)):
link_filter = (base_linkings_test[i].flatten()) != 1
filtered_ids_list.extend(((link_ids_test[i].flatten())[link_filter]).reshape((-1, 1)))
filtered_confidency_list.extend(((test_sample[i].flatten())[link_filter]).reshape((-1, 1)))
with open(outpath + outname + "_results.csv","w+") as my_csv:
csvWriter = csv.writer(my_csv,delimiter='|')
csvWriter.writerows(zip(
np.asarray(filtered_ids_list),
np.asarray(filtered_confidency_list)))
Discriminator = build_discriminator(module_size)
Generator = build_generator(module_size)
print("- GAN building is completed")
disc_opt = tf.keras.optimizers.Adam(learning_rate=discriminator_learning_rate,
beta_1=0.9,
beta_2=0.99)
gen_opt = tf.keras.optimizers.Adam(learning_rate=generator_learning_rate,
beta_1=0.9,
beta_2=0.99)
train_data = generate_condition_and_linking_set(N90_Mod, N90_Em, N90_Ad,
N100_Ad, module_size, recomb_per_module)
test_data = generate_condition_and_linking_set(N100_Mod, N100_Em, N100_Ad,
N100_Ad, module_size, 1)
print("- Data processing is completed")
train_gan(Discriminator, Generator, n_epochs, train_data, test_data, batch_size,
discriminator_iter_num, generator_iter_num, disc_opt, gen_opt, gradient_penalty_weight)
| [
"tensorflow.random.set_seed",
"numpy.random.seed",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"numpy.empty",
"random.sample",
"random.shuffle",
"tensorflow.keras.layers.LayerNormalization",
"numpy.isnan",
"numpy.shape",
"tensorflow.keras.layers.Leaky... | [((364, 387), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (375, 387), False, 'import random\n'), ((470, 496), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (484, 496), True, 'import numpy as np\n'), ((589, 619), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed_value'], {}), '(seed_value)\n', (607, 619), True, 'import tensorflow as tf\n'), ((1784, 1896), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN100_Ad']"], {'delimiter': '""" """', 'names': "['SOURCE', 'TARGET']", 'header': '(0)'}), "(input_settings['oSourceList']['oN100_Ad'], delimiter=' ', names\n =['SOURCE', 'TARGET'], header=0)\n", (1795, 1896), True, 'import pandas as pd\n'), ((1929, 2040), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN90_Ad']"], {'delimiter': '""" """', 'names': "['SOURCE', 'TARGET']", 'header': '(0)'}), "(input_settings['oSourceList']['oN90_Ad'], delimiter=' ', names=\n ['SOURCE', 'TARGET'], header=0)\n", (1940, 2040), True, 'import pandas as pd\n'), ((2075, 2174), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN100_Em']"], {'delimiter': '""" """', 'header': 'None', 'index_col': '(0)'}), "(input_settings['oSourceList']['oN100_Em'], delimiter=' ',\n header=None, index_col=0)\n", (2086, 2174), True, 'import pandas as pd\n'), ((2207, 2306), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN90_Em']"], {'delimiter': '""" """', 'header': 'None', 'index_col': '(0)'}), "(input_settings['oSourceList']['oN90_Em'], delimiter=' ', header\n =None, index_col=0)\n", (2218, 2306), True, 'import pandas as pd\n'), ((2341, 2442), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN100_Mod']"], {'delimiter': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(input_settings['oSourceList']['oN100_Mod'], delimiter='\\t',\n header=None, index_col=0)\n", (2352, 2442), True, 'import pandas as pd\n'), ((2477, 2577), 'pandas.read_csv', 'pd.read_csv', (["input_settings['oSourceList']['oN90_Mod']"], {'delimiter': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(input_settings['oSourceList']['oN90_Mod'], delimiter='\\t',\n header=None, index_col=0)\n", (2488, 2577), True, 'import pandas as pd\n'), ((17110, 17207), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'discriminator_learning_rate', 'beta_1': '(0.9)', 'beta_2': '(0.99)'}), '(learning_rate=discriminator_learning_rate, beta_1=\n 0.9, beta_2=0.99)\n', (17134, 17207), True, 'import tensorflow as tf\n'), ((17288, 17380), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'generator_learning_rate', 'beta_1': '(0.9)', 'beta_2': '(0.99)'}), '(learning_rate=generator_learning_rate, beta_1=0.9,\n beta_2=0.99)\n', (17312, 17380), True, 'import tensorflow as tf\n'), ((1430, 1449), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (1439, 1449), False, 'import json\n'), ((2877, 2891), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2889, 2891), True, 'import multiprocessing as mp\n'), ((3819, 3852), 'numpy.zeros', 'np.zeros', (['(m_size, module_th_low)'], {}), '((m_size, module_th_low))\n', (3827, 3852), True, 'import numpy as np\n'), ((3876, 3902), 'numpy.zeros', 'np.zeros', (['(m_size, m_size)'], {}), '((m_size, m_size))\n', (3884, 3902), True, 'import numpy as np\n'), ((3924, 3950), 'numpy.zeros', 'np.zeros', (['(m_size, m_size)'], {}), '((m_size, m_size))\n', (3932, 3950), True, 'import numpy as np\n'), ((3966, 4006), 'numpy.empty', 'np.empty', (['(m_size, m_size)'], {'dtype': 'object'}), '((m_size, m_size), dtype=object)\n', (3974, 4006), True, 'import numpy as np\n'), ((5437, 5481), 'random.sample', 'random.sample', (['combinations', 'comb_per_module'], {}), '(combinations, comb_per_module)\n', (5450, 5481), False, 'import random\n'), ((6555, 6580), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (6567, 6580), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((6604, 6641), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(module_dim, module_dim)'}), '(shape=(module_dim, module_dim))\n', (6609, 6641), False, 'from tensorflow.keras.layers import Input\n'), ((6739, 6776), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(module_dim, module_dim)'}), '(shape=(module_dim, module_dim))\n', (6744, 6776), False, 'from tensorflow.keras.layers import Input\n'), ((6872, 6909), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(module_dim, module_dim)'}), '(shape=(module_dim, module_dim))\n', (6877, 6909), False, 'from tensorflow.keras.layers import Input\n'), ((7959, 8026), 'tensorflow.keras.models.Model', 'Model', (['[condition_input, adjacency_input, linking_input]', 'out_layer'], {}), '([condition_input, adjacency_input, linking_input], out_layer)\n', (7964, 8026), False, 'from tensorflow.keras.models import Model\n'), ((8125, 8150), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (8137, 8150), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((8446, 8471), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (8458, 8471), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((8819, 8844), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (8831, 8844), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((8868, 8905), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(module_dim, module_dim)'}), '(shape=(module_dim, module_dim))\n', (8873, 8905), False, 'from tensorflow.keras.layers import Input\n'), ((9003, 9040), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(module_dim, module_dim)'}), '(shape=(module_dim, module_dim))\n', (9008, 9040), False, 'from tensorflow.keras.layers import Input\n'), ((9845, 9897), 'tensorflow.keras.models.Model', 'Model', (['[condition_input, adjacency_input]', 'out_layer'], {}), '([condition_input, adjacency_input], out_layer)\n', (9850, 9897), False, 'from tensorflow.keras.models import Model\n'), ((10749, 10811), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[batch_num, 1, 1]', 'minval': '(0)', 'maxval': '(1)'}), '(shape=[batch_num, 1, 1], minval=0, maxval=1)\n', (10766, 10811), True, 'import tensorflow as tf\n'), ((11319, 11352), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((norm - 1.0) ** 2)'], {}), '((norm - 1.0) ** 2)\n', (11333, 11352), True, 'import tensorflow as tf\n'), ((15623, 15675), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'num_thresholds': '(50)', 'curve': '"""ROC"""'}), "(num_thresholds=50, curve='ROC')\n", (15643, 15675), True, 'import tensorflow as tf\n'), ((15875, 15893), 'random.shuffle', 'shuffle', (['data_test'], {}), '(data_test)\n', (15882, 15893), False, 'from random import shuffle\n'), ((15943, 15964), 'numpy.asarray', 'np.asarray', (['data_test'], {}), '(data_test)\n', (15953, 15964), True, 'import numpy as np\n'), ((5367, 5410), 'itertools.combinations', 'it.combinations', (['all_indeces', 'module_th_low'], {}), '(all_indeces, module_th_low)\n', (5382, 5410), True, 'import itertools as it\n'), ((5635, 5679), 'numpy.take', 'np.take', (['embedding', 'index_combinations[i]', '(0)'], {}), '(embedding, index_combinations[i], 0)\n', (5642, 5679), True, 'import numpy as np\n'), ((5717, 5766), 'numpy.take', 'np.take', (['adjacency_base', 'index_combinations[i]', '(0)'], {}), '(adjacency_base, index_combinations[i], 0)\n', (5724, 5766), True, 'import numpy as np\n'), ((5804, 5865), 'numpy.take', 'np.take', (['adjacency_base_combination', 'index_combinations[i]', '(1)'], {}), '(adjacency_base_combination, index_combinations[i], 1)\n', (5811, 5865), True, 'import numpy as np\n'), ((5903, 5954), 'numpy.take', 'np.take', (['adjacency_plus_1', 'index_combinations[i]', '(0)'], {}), '(adjacency_plus_1, index_combinations[i], 0)\n', (5910, 5954), True, 'import numpy as np\n'), ((5992, 6053), 'numpy.take', 'np.take', (['adjacency_plus_combination', 'index_combinations[i]', '(1)'], {}), '(adjacency_plus_combination, index_combinations[i], 1)\n', (5999, 6053), True, 'import numpy as np\n'), ((6084, 6127), 'numpy.take', 'np.take', (['link_ids', 'index_combinations[i]', '(0)'], {}), '(link_ids, index_combinations[i], 0)\n', (6091, 6127), True, 'import numpy as np\n'), ((6158, 6212), 'numpy.take', 'np.take', (['linking_combination', 'index_combinations[i]', '(1)'], {}), '(linking_combination, index_combinations[i], 1)\n', (6165, 6212), True, 'import numpy as np\n'), ((6658, 6694), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim, 1)'], {}), '((module_dim, module_dim, 1))\n', (6665, 6694), False, 'from tensorflow.keras.layers import Reshape\n'), ((6793, 6829), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim, 1)'], {}), '((module_dim, module_dim, 1))\n', (6800, 6829), False, 'from tensorflow.keras.layers import Reshape\n'), ((6924, 6960), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim, 1)'], {}), '((module_dim, module_dim, 1))\n', (6931, 6960), False, 'from tensorflow.keras.layers import Reshape\n'), ((6993, 7006), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (7004, 7006), False, 'from tensorflow.keras.layers import Concatenate\n'), ((7058, 7133), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (7064, 7133), False, 'from tensorflow.keras.layers import Conv2D\n'), ((7152, 7172), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (7170, 7172), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((7209, 7229), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7218, 7229), False, 'from tensorflow.keras.layers import LeakyReLU, ReLU\n'), ((7252, 7328), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (7258, 7328), False, 'from tensorflow.keras.layers import Conv2D\n'), ((7348, 7368), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (7366, 7368), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((7405, 7425), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7414, 7425), False, 'from tensorflow.keras.layers import LeakyReLU, ReLU\n'), ((7448, 7524), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (7454, 7524), False, 'from tensorflow.keras.layers import Conv2D\n'), ((7544, 7564), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (7562, 7564), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((7601, 7621), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7610, 7621), False, 'from tensorflow.keras.layers import LeakyReLU, ReLU\n'), ((7644, 7720), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (7650, 7720), False, 'from tensorflow.keras.layers import Conv2D\n'), ((7740, 7760), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (7758, 7760), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((7797, 7817), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (7806, 7817), False, 'from tensorflow.keras.layers import LeakyReLU, ReLU\n'), ((7840, 7849), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7847, 7849), False, 'from tensorflow.keras.layers import Flatten\n'), ((7871, 7883), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (7878, 7883), False, 'from tensorflow.keras.layers import Dropout\n'), ((7908, 7937), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (7913, 7937), False, 'from tensorflow.keras.layers import Dense\n'), ((8159, 8245), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (8165, 8245), False, 'from tensorflow.keras.layers import Conv2D\n'), ((8329, 8347), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8339, 8347), False, 'from tensorflow.keras.layers import Activation\n'), ((8481, 8576), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['n_filters', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(n_filters, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (8496, 8576), False, 'from tensorflow.keras.layers import Conv2DTranspose\n'), ((8589, 8609), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (8607, 8609), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((8697, 8710), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (8708, 8710), False, 'from tensorflow.keras.layers import Concatenate\n'), ((8733, 8751), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (8743, 8751), False, 'from tensorflow.keras.layers import Activation\n'), ((8922, 8958), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim, 1)'], {}), '((module_dim, module_dim, 1))\n', (8929, 8958), False, 'from tensorflow.keras.layers import Reshape\n'), ((9057, 9093), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim, 1)'], {}), '((module_dim, module_dim, 1))\n', (9064, 9093), False, 'from tensorflow.keras.layers import Reshape\n'), ((9128, 9141), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (9139, 9141), False, 'from tensorflow.keras.layers import Concatenate\n'), ((9334, 9410), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (9340, 9410), False, 'from tensorflow.keras.layers import Conv2D\n'), ((9421, 9439), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9431, 9439), False, 'from tensorflow.keras.layers import Activation\n'), ((9640, 9727), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(1)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(1, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (9655, 9727), False, 'from tensorflow.keras.layers import Conv2DTranspose\n'), ((9742, 9775), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(module_dim, module_dim)'], {}), '((module_dim, module_dim))\n', (9749, 9775), False, 'from tensorflow.keras.layers import Reshape\n'), ((9795, 9816), 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9805, 9816), False, 'from tensorflow.keras.layers import Activation\n'), ((10063, 10092), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_fake_output'], {}), '(d_fake_output)\n', (10077, 10092), True, 'import tensorflow as tf\n'), ((10095, 10124), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_real_output'], {}), '(d_real_output)\n', (10109, 10124), True, 'import tensorflow as tf\n'), ((10401, 10430), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_fake_output'], {}), '(d_fake_output)\n', (10415, 10430), True, 'import tensorflow as tf\n'), ((10851, 10890), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', (['real_sample', 'tf.float32'], {}), '(real_sample, tf.float32)\n', (10865, 10890), True, 'import tensorflow as tf\n'), ((10977, 10994), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10992, 10994), True, 'import tensorflow as tf\n'), ((13241, 13260), 'random.shuffle', 'shuffle', (['data_train'], {}), '(data_train)\n', (13248, 13260), False, 'from random import shuffle\n'), ((13320, 13342), 'numpy.asarray', 'np.asarray', (['data_train'], {}), '(data_train)\n', (13330, 13342), True, 'import numpy as np\n'), ((16808, 16841), 'csv.writer', 'csv.writer', (['my_csv'], {'delimiter': '"""|"""'}), "(my_csv, delimiter='|')\n", (16818, 16841), False, 'import csv\n'), ((3137, 3165), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'n_workers'}), '(processes=n_workers)\n', (3144, 3165), True, 'import multiprocessing as mp\n'), ((3769, 3801), 'numpy.isnan', 'np.isnan', (['N_Mod_base.iloc[index]'], {}), '(N_Mod_base.iloc[index])\n', (3777, 3801), True, 'import numpy as np\n'), ((8281, 8301), 'tensorflow.keras.layers.LayerNormalization', 'LayerNormalization', ([], {}), '()\n', (8299, 8301), False, 'from tensorflow.keras.layers import BatchNormalization, LayerNormalization\n'), ((8657, 8669), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (8664, 8669), False, 'from tensorflow.keras.layers import Dropout\n'), ((11274, 11294), 'tensorflow.square', 'tf.square', (['gradients'], {}), '(gradients)\n', (11283, 11294), True, 'import tensorflow as tf\n'), ((11568, 11585), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (11583, 11585), True, 'import tensorflow as tf\n'), ((12501, 12518), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (12516, 12518), True, 'import tensorflow as tf\n'), ((16013, 16048), 'numpy.asarray', 'np.asarray', (['data_test_n[:, 0, :, :]'], {}), '(data_test_n[:, 0, :, :])\n', (16023, 16048), True, 'import numpy as np\n'), ((16085, 16120), 'numpy.asarray', 'np.asarray', (['data_test_n[:, 1, :, :]'], {}), '(data_test_n[:, 1, :, :])\n', (16095, 16120), True, 'import numpy as np\n'), ((16164, 16199), 'numpy.asarray', 'np.asarray', (['data_test_n[:, 2, :, :]'], {}), '(data_test_n[:, 2, :, :])\n', (16174, 16199), True, 'import numpy as np\n'), ((5311, 5337), 'numpy.shape', 'np.shape', (['adjacency_plus_1'], {}), '(adjacency_plus_1)\n', (5319, 5337), True, 'import numpy as np\n'), ((13403, 13439), 'numpy.asarray', 'np.asarray', (['data_train_n[:, 1, :, :]'], {}), '(data_train_n[:, 1, :, :])\n', (13413, 13439), True, 'import numpy as np\n'), ((13488, 13524), 'numpy.asarray', 'np.asarray', (['data_train_n[:, 2, :, :]'], {}), '(data_train_n[:, 2, :, :])\n', (13498, 13524), True, 'import numpy as np\n'), ((13568, 13604), 'numpy.asarray', 'np.asarray', (['data_train_n[:, 3, :, :]'], {}), '(data_train_n[:, 3, :, :])\n', (13578, 13604), True, 'import numpy as np\n'), ((16886, 16915), 'numpy.asarray', 'np.asarray', (['filtered_ids_list'], {}), '(filtered_ids_list)\n', (16896, 16915), True, 'import numpy as np\n'), ((16930, 16966), 'numpy.asarray', 'np.asarray', (['filtered_confidency_list'], {}), '(filtered_confidency_list)\n', (16940, 16966), True, 'import numpy as np\n'), ((3247, 3268), 'itertools.repeat', 'it.repeat', (['N_Mod_base'], {}), '(N_Mod_base)\n', (3256, 3268), True, 'import itertools as it\n'), ((3291, 3311), 'itertools.repeat', 'it.repeat', (['N_Em_base'], {}), '(N_Em_base)\n', (3300, 3311), True, 'import itertools as it\n'), ((3313, 3333), 'itertools.repeat', 'it.repeat', (['N_Ad_base'], {}), '(N_Ad_base)\n', (3322, 3333), True, 'import itertools as it\n'), ((3335, 3357), 'itertools.repeat', 'it.repeat', (['N_Ad_plus_1'], {}), '(N_Ad_plus_1)\n', (3344, 3357), True, 'import itertools as it\n'), ((3380, 3404), 'itertools.repeat', 'it.repeat', (['module_th_low'], {}), '(module_th_low)\n', (3389, 3404), True, 'import itertools as it\n'), ((3406, 3432), 'itertools.repeat', 'it.repeat', (['comb_per_module'], {}), '(comb_per_module)\n', (3415, 3432), True, 'import itertools as it\n')] |
import numpy as np
DAYS = 256
# with open("./input.txt", "r") as f:
# raw = f.read()
# fish = np.array([int(x) for x in raw.split(",")])
fish = np.loadtxt("./input.txt", delimiter=",", dtype=np.uint64)
def compact(fish):
rle = np.zeros(9, np.uint64)
rle[:6] = np.array([np.sum(fish == k) for k in range(6)])
for _t in range(DAYS):
# print(_t, rle)
zeros = rle[0]
rle[:-1] = rle[1:]
rle[6] += zeros
rle[8] = zeros
return np.sum(rle)
print(compact(fish))
| [
"numpy.sum",
"numpy.zeros",
"numpy.loadtxt"
] | [((151, 208), 'numpy.loadtxt', 'np.loadtxt', (['"""./input.txt"""'], {'delimiter': '""","""', 'dtype': 'np.uint64'}), "('./input.txt', delimiter=',', dtype=np.uint64)\n", (161, 208), True, 'import numpy as np\n'), ((240, 262), 'numpy.zeros', 'np.zeros', (['(9)', 'np.uint64'], {}), '(9, np.uint64)\n', (248, 262), True, 'import numpy as np\n'), ((487, 498), 'numpy.sum', 'np.sum', (['rle'], {}), '(rle)\n', (493, 498), True, 'import numpy as np\n'), ((287, 304), 'numpy.sum', 'np.sum', (['(fish == k)'], {}), '(fish == k)\n', (293, 304), True, 'import numpy as np\n')] |
import os
import gzip
import struct
import tarfile
import pickle
import shutil
import numpy as np
import tables as tb
from .timerx import timer
def extractall(tar_name, root):
'''
解压 tar 文件并返回路径
root:解压的根目录
'''
with tarfile.open(tar_name) as tar:
tar.extractall(root)
tar_root = tar.getnames()[0]
return os.path.join(root, tar_root)
class MNIST(dict):
def __init__(self, root, namespace, *args, **kw):
"""
(MNIST handwritten digits dataset from http://yann.lecun.com/exdb/mnist)
(A dataset of Zalando's article images consisting of fashion products,
a drop-in replacement of the original MNIST dataset from https://github.com/zalandoresearch/fashion-mnist)
Each sample is an image (in 3D NDArray) with shape (28, 28, 1).
Parameters
----------
root : 数据根目录,如 'E:/Data/Zip/'
namespace : 'mnist' or 'fashion_mnist'
"""
super().__init__(*args, **kw)
self.__dict__ = self
if namespace == 'mnist':
self.url = 'http://yann.lecun.com/exdb/mnist'
self.label_names = tuple(range(10))
elif namespace == 'fashion_mnist':
self.url = 'https://github.com/zalandoresearch/fashion-mnist'
self.label_names = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag',
'Ankle boot')
self.namespace = os.path.join(root, namespace)
self._dataset(self.namespace)
def _get_data(self, root, _train):
'''
官方网站的数据是以 `[offset][type][value][description]` 的格式封装的,因而 `struct.unpack` 时需要注意
_train : bool, default True
Whether to load the training or testing set.
'''
_train_data = os.path.join(root, 'train-images-idx3-ubyte.gz')
_train_label = os.path.join(root, 'train-labels-idx1-ubyte.gz')
_test_data = os.path.join(root, 't10k-images-idx3-ubyte.gz')
_test_label = os.path.join(root, 't10k-labels-idx1-ubyte.gz')
if _train:
data, label = _train_data, _train_label
else:
data, label = _test_data, _test_label
with gzip.open(label, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.frombuffer(fin.read(), dtype='B').astype('int32')
with gzip.open(data, 'rb') as fin:
Y = struct.unpack(">IIII", fin.read(16))
data = np.frombuffer(fin.read(), dtype=np.uint8)
data = data.reshape(Y[1:])
return data, label
def _dataset(self, root):
self.trainX, self.trainY = self._get_data(root, True)
self.testX, self.testY = self._get_data(root, False)
class Cifar(dict):
def __init__(self, root, namespace, *args, **kwds):
"""CIFAR image classification dataset from https://www.cs.toronto.edu/~kriz/cifar.html
Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
Parameters
----------
meta : 保存了类别信息
root : str, 数据根目录
namespace : 'cifar-10' 或 'cifar-100'
"""
super().__init__(*args, **kwds)
self.__dict__ = self
self.url = 'https://www.cs.toronto.edu/~kriz/cifar.html'
self.namespace = namespace
self._read_batch(root)
def _get_dataset(self, root):
dataset = {}
tar_name = os.path.join(root, f'{self.namespace}-python.tar.gz')
tar_root = extractall(tar_name, root)
for name in os.listdir(tar_root):
k = name.split('/')[-1]
path = os.path.join(tar_root, name)
if name.startswith('data_batch') or name.startswith(
'test') or name.startswith('train'):
with open(path, 'rb') as fp:
dataset[k] = pickle.load(fp, encoding='bytes')
elif name.endswith('meta'):
with open(path, 'rb') as fp:
dataset['meta'] = pickle.load(fp)
shutil.rmtree(tar_root)
return dataset
def _read_batch(self, root):
_dataset = self._get_dataset(root)
if self.namespace == 'cifar-10':
self.trainX = np.concatenate([
_dataset[f'data_batch_{i}'][b'data'] for i in range(1, 6)
]).reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))
self.trainY = np.concatenate([
np.asanyarray(_dataset[f'data_batch_{i}'][b'labels'])
for i in range(1, 6)
])
self.testX = _dataset['test_batch'][b'data'].reshape(
-1, 3, 32, 32).transpose((0, 2, 3, 1))
self.testY = np.asanyarray(_dataset['test_batch'][b'labels'])
self.label_names = _dataset['meta']['label_names']
elif self.namespace == 'cifar-100':
self.trainX = _dataset['train'][b'data'].reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))
self.train_fine_labels = np.asanyarray(
_dataset['train'][b'fine_labels']) # 子类标签
self.train_coarse_labels = np.asanyarray(
_dataset['train'][b'coarse_labels']) # 超类标签
self.testX = _dataset['test'][b'data'].reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))
self.test_fine_labels = np.asanyarray(
_dataset['test'][b'fine_labels']) # 子类标签
self.test_coarse_labels = np.asanyarray(
_dataset['test'][b'coarse_labels']) # 超类标签
self.fine_label_names = _dataset['meta']['fine_label_names']
self.coarse_label_names = _dataset['meta']['coarse_label_names']
class DataBunch(dict):
def __init__(self, root, save_dir, *args, **kwds):
'''
封装 Cifar10、Cifar100、MNIST、Fashion MNIST 数据集
'''
super().__init__(*args, **kwds)
self.__dict__ = self
self.mnist = MNIST(root, 'mnist')
self.fashion_mnist = MNIST(root, 'fashion_mnist')
self.cifar10 = Cifar(root, 'cifar-10')
self.cifar100 = Cifar(root, 'cifar-100')
self.bunch2hdf5(save_dir)
@timer
def bunch2hdf5(self, h5_root):
filters = tb.Filters(complevel=7, shuffle=False)
# 这里我采用了压缩表,因而保存为 `.h5c` 但也可以保存为 `.h5`
with tb.open_file(os.path.join(h5_root, 'X.h5'), 'w', filters=filters,
title='Xinet\'s dataset') as h5:
for name in self.keys():
h5.create_group('/', name, title=f'{self[name].url}')
h5.create_array(
h5.root[name],
'trainX',
self[name].trainX,
title='train X')
h5.create_array(
h5.root[name], 'testX', self[name].testX, title='test X')
if name != 'cifar100':
h5.create_array(
h5.root[name],
'trainY',
self[name].trainY,
title='train Y')
h5.create_array(
h5.root[name],
'testY',
self[name].testY,
title='test Y')
h5.create_array(
h5.root[name],
'label_names',
self[name].label_names,
title='标签名称')
else:
h5.create_array(
h5.root[name],
'train_coarse_labels',
self[name].train_coarse_labels,
title='train_coarse_labels')
h5.create_array(
h5.root[name],
'test_coarse_labels',
self[name].test_coarse_labels,
title='test_coarse_labels')
h5.create_array(
h5.root[name],
'train_fine_labels',
self[name].train_fine_labels,
title='train_fine_labels')
h5.create_array(
h5.root[name],
'test_fine_labels',
self[name].test_fine_labels,
title='test_fine_labels')
h5.create_array(
h5.root[name],
'coarse_label_names',
self[name].coarse_label_names,
title='coarse_label_names')
h5.create_array(
h5.root[name],
'fine_label_names',
self[name].fine_label_names,
title='fine_label_names')
def copy_hdf5(path, topath='D:/temp/datasets.h5'):
'''
path:: HDF5 文件所在路径
topath:: 副本的路径
'''
with tb.open_file(path) as h5:
h5.copy_file(topath, overwrite=True) | [
"gzip.open",
"numpy.asanyarray",
"tables.Filters",
"pickle.load",
"tarfile.open",
"shutil.rmtree",
"tables.open_file",
"os.path.join",
"os.listdir"
] | [((348, 376), 'os.path.join', 'os.path.join', (['root', 'tar_root'], {}), '(root, tar_root)\n', (360, 376), False, 'import os\n'), ((240, 262), 'tarfile.open', 'tarfile.open', (['tar_name'], {}), '(tar_name)\n', (252, 262), False, 'import tarfile\n'), ((1498, 1527), 'os.path.join', 'os.path.join', (['root', 'namespace'], {}), '(root, namespace)\n', (1510, 1527), False, 'import os\n'), ((1832, 1880), 'os.path.join', 'os.path.join', (['root', '"""train-images-idx3-ubyte.gz"""'], {}), "(root, 'train-images-idx3-ubyte.gz')\n", (1844, 1880), False, 'import os\n'), ((1904, 1952), 'os.path.join', 'os.path.join', (['root', '"""train-labels-idx1-ubyte.gz"""'], {}), "(root, 'train-labels-idx1-ubyte.gz')\n", (1916, 1952), False, 'import os\n'), ((1974, 2021), 'os.path.join', 'os.path.join', (['root', '"""t10k-images-idx3-ubyte.gz"""'], {}), "(root, 't10k-images-idx3-ubyte.gz')\n", (1986, 2021), False, 'import os\n'), ((2044, 2091), 'os.path.join', 'os.path.join', (['root', '"""t10k-labels-idx1-ubyte.gz"""'], {}), "(root, 't10k-labels-idx1-ubyte.gz')\n", (2056, 2091), False, 'import os\n'), ((3434, 3487), 'os.path.join', 'os.path.join', (['root', 'f"""{self.namespace}-python.tar.gz"""'], {}), "(root, f'{self.namespace}-python.tar.gz')\n", (3446, 3487), False, 'import os\n'), ((3554, 3574), 'os.listdir', 'os.listdir', (['tar_root'], {}), '(tar_root)\n', (3564, 3574), False, 'import os\n'), ((4041, 4064), 'shutil.rmtree', 'shutil.rmtree', (['tar_root'], {}), '(tar_root)\n', (4054, 4064), False, 'import shutil\n'), ((6168, 6206), 'tables.Filters', 'tb.Filters', ([], {'complevel': '(7)', 'shuffle': '(False)'}), '(complevel=7, shuffle=False)\n', (6178, 6206), True, 'import tables as tb\n'), ((8903, 8921), 'tables.open_file', 'tb.open_file', (['path'], {}), '(path)\n', (8915, 8921), True, 'import tables as tb\n'), ((2241, 2263), 'gzip.open', 'gzip.open', (['label', '"""rb"""'], {}), "(label, 'rb')\n", (2250, 2263), False, 'import gzip\n'), ((2405, 2426), 'gzip.open', 'gzip.open', (['data', '"""rb"""'], {}), "(data, 'rb')\n", (2414, 2426), False, 'import gzip\n'), ((3631, 3659), 'os.path.join', 'os.path.join', (['tar_root', 'name'], {}), '(tar_root, name)\n', (3643, 3659), False, 'import os\n'), ((4696, 4744), 'numpy.asanyarray', 'np.asanyarray', (["_dataset['test_batch'][b'labels']"], {}), "(_dataset['test_batch'][b'labels'])\n", (4709, 4744), True, 'import numpy as np\n'), ((4989, 5037), 'numpy.asanyarray', 'np.asanyarray', (["_dataset['train'][b'fine_labels']"], {}), "(_dataset['train'][b'fine_labels'])\n", (5002, 5037), True, 'import numpy as np\n'), ((5102, 5152), 'numpy.asanyarray', 'np.asanyarray', (["_dataset['train'][b'coarse_labels']"], {}), "(_dataset['train'][b'coarse_labels'])\n", (5115, 5152), True, 'import numpy as np\n'), ((5312, 5359), 'numpy.asanyarray', 'np.asanyarray', (["_dataset['test'][b'fine_labels']"], {}), "(_dataset['test'][b'fine_labels'])\n", (5325, 5359), True, 'import numpy as np\n'), ((5423, 5472), 'numpy.asanyarray', 'np.asanyarray', (["_dataset['test'][b'coarse_labels']"], {}), "(_dataset['test'][b'coarse_labels'])\n", (5436, 5472), True, 'import numpy as np\n'), ((6280, 6309), 'os.path.join', 'os.path.join', (['h5_root', '"""X.h5"""'], {}), "(h5_root, 'X.h5')\n", (6292, 6309), False, 'import os\n'), ((3860, 3893), 'pickle.load', 'pickle.load', (['fp'], {'encoding': '"""bytes"""'}), "(fp, encoding='bytes')\n", (3871, 3893), False, 'import pickle\n'), ((4444, 4497), 'numpy.asanyarray', 'np.asanyarray', (["_dataset[f'data_batch_{i}'][b'labels']"], {}), "(_dataset[f'data_batch_{i}'][b'labels'])\n", (4457, 4497), True, 'import numpy as np\n'), ((4017, 4032), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (4028, 4032), False, 'import pickle\n')] |
"""
Functions for surface visualization.
Only matplotlib is required.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colorbar import make_axes
from matplotlib.cm import ScalarMappable, get_cmap
from matplotlib.colors import Normalize, LinearSegmentedColormap, to_rgba
from matplotlib.patches import Patch
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from ..surface import load_surf_data, load_surf_mesh
from .img_plotting import _get_colorbar_and_data_ranges, _crop_colorbar
def plot_surf(surf_mesh, surf_map=None, bg_map=None,
hemi='left', view='lateral', cmap=None, colorbar=False,
avg_method='mean', threshold=None, alpha='auto',
bg_on_data=False, darkness=1, vmin=None, vmax=None,
cbar_vmin=None, cbar_vmax=None,
title=None, output_file=None, axes=None, figure=None, **kwargs):
""" Plotting of surfaces with optional background and data
.. versionadded:: 0.3
Parameters
----------
surf_mesh: str or list of two numpy.ndarray
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z coordinates
of the mesh vertices, the second containing the indices
(into coords) of the mesh faces.
surf_map: str or numpy.ndarray, optional.
Data to be displayed on the surface mesh. Can be a file (valid formats
are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as
.thickness, .curv, .sulc, .annot, .label) or
a Numpy array with a value for each vertex of the surf_mesh.
bg_map: Surface data object (to be defined), optional,
Background image to be plotted on the mesh underneath the
surf_data in greyscale, most likely a sulcal depth map for
realistic shading.
hemi : {'left', 'right'}, default is 'left'
Hemisphere to display.
view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},
default is 'lateral'
View of the surface that is rendered.
cmap: matplotlib colormap, str or colormap object, default is None
To use for plotting of the stat_map. Either a string
which is a name of a matplotlib colormap, or a matplotlib
colormap object. If None, matplotlib default will be chosen
colorbar : bool, optional, default is False
If True, a colorbar of surf_map is displayed.
avg_method: {'mean', 'median'}, default is 'mean'
How to average vertex values to derive the face value, mean results
in smooth, median in sharp boundaries.
threshold : a number or None, default is None.
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image, values
below the threshold (in absolute value) are plotted as transparent.
alpha: float, alpha level of the mesh (not surf_data), default 'auto'
If 'auto' is chosen, alpha will default to .5 when no bg_map
is passed and to 1 if a bg_map is passed.
bg_on_data: bool, default is False
If True, and a bg_map is specified, the surf_data data is multiplied
by the background image, so that e.g. sulcal depth is visible beneath
the surf_data.
NOTE: that this non-uniformly changes the surf_data values according
to e.g the sulcal depth.
darkness: float, between 0 and 1, default is 1
Specifying the darkness of the background image.
1 indicates that the original values of the background are used.
.5 indicates the background values are reduced by half before being
applied.
vmin, vmax: lower / upper bound to plot surf_data values
If None , the values will be set to min/max of the data
title : str, optional
Figure title.
output_file: str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
axes: instance of matplotlib axes, None, optional
The axes instance to plot to. The projection must be '3d' (e.g.,
`figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
where axes should be passed.).
If None, a new axes is created.
figure: instance of matplotlib figure, None, optional
The figure instance to plot to. If None, a new figure is created.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf_roi : For plotting statistical maps on brain
surfaces.
nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on
brain surfaces.
"""
# load mesh and derive axes limits
mesh = load_surf_mesh(surf_mesh)
coords, faces = mesh[0], mesh[1]
limits = [coords.min(), coords.max()]
# set view
if hemi == 'right':
if view == 'lateral':
elev, azim = 0, 0
elif view == 'medial':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
elif hemi == 'left':
if view == 'medial':
elev, azim = 0, 0
elif view == 'lateral':
elev, azim = 0, 180
elif view == 'dorsal':
elev, azim = 90, 0
elif view == 'ventral':
elev, azim = 270, 0
elif view == 'anterior':
elev, azim = 0, 90
elif view == 'posterior':
elev, azim = 0, 270
else:
raise ValueError('view must be one of lateral, medial, '
'dorsal, ventral, anterior, or posterior')
else:
raise ValueError('hemi must be one of right or left')
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if no cmap is given, set to matplotlib default
if cmap is None:
cmap = plt.cm.get_cmap(plt.rcParamsDefault['image.cmap'])
else:
# if cmap is given as string, translate to matplotlib cmap
if isinstance(cmap, str):
cmap = plt.cm.get_cmap(cmap)
# initiate figure and 3d axes
if axes is None:
if figure is None:
figure = plt.figure()
axes = Axes3D(figure, rect=[0, 0, 1, 1],
xlim=limits, ylim=limits)
else:
if figure is None:
figure = axes.get_figure()
axes.set_xlim(*limits)
axes.set_ylim(*limits)
axes.view_init(elev=elev, azim=azim)
axes.set_axis_off()
# plot mesh without data
p3dcollec = axes.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
# reduce viewing distance to remove space around mesh
axes.dist = 8
# set_facecolors function of Poly3DCollection is used as passing the
# facecolors argument to plot_trisurf does not seem to work
face_colors = np.ones((faces.shape[0], 4))
if bg_map is None:
bg_data = np.ones(coords.shape[0]) * 0.5
else:
bg_data = load_surf_data(bg_map)
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
if bg_faces.min() != bg_faces.max():
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
# control background darkness
bg_faces *= darkness
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha * face_colors[:, 3]
# should it be possible to modify alpha of surf data as well?
if surf_map is not None:
surf_map_data = load_surf_data(surf_map)
if surf_map_data.ndim != 1:
raise ValueError('surf_map can only have one dimension but has'
'%i dimensions' % surf_map_data.ndim)
if surf_map_data.shape[0] != coords.shape[0]:
raise ValueError('The surf_map does not have the same number '
'of vertices as the mesh.')
# create face values from vertex values by selected avg methods
if avg_method == 'mean':
surf_map_faces = np.mean(surf_map_data[faces], axis=1)
elif avg_method == 'median':
surf_map_faces = np.median(surf_map_data[faces], axis=1)
# if no vmin/vmax are passed figure them out from data
if vmin is None:
vmin = np.nanmin(surf_map_faces)
if vmax is None:
vmax = np.nanmax(surf_map_faces)
# treshold if inidcated
if threshold is None:
kept_indices = np.arange(surf_map_faces.shape[0])
else:
kept_indices = np.where(np.abs(surf_map_faces) >= threshold)[0]
surf_map_faces = surf_map_faces - vmin
surf_map_faces = surf_map_faces / (vmax - vmin)
# multiply data with background if indicated
if bg_on_data:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])\
* face_colors[kept_indices]
else:
face_colors[kept_indices] = cmap(surf_map_faces[kept_indices])
if colorbar:
our_cmap = get_cmap(cmap)
norm = Normalize(vmin=vmin, vmax=vmax)
nb_ticks = 5
ticks = np.linspace(vmin, vmax, nb_ticks)
bounds = np.linspace(vmin, vmax, our_cmap.N)
if threshold is not None:
cmaplist = [our_cmap(i) for i in range(our_cmap.N)]
# set colors to grey for absolute values < threshold
istart = int(norm(-threshold, clip=True) *
(our_cmap.N - 1))
istop = int(norm(threshold, clip=True) *
(our_cmap.N - 1))
for i in range(istart, istop):
cmaplist[i] = (0.5, 0.5, 0.5, 1.)
our_cmap = LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, our_cmap.N)
# we need to create a proxy mappable
proxy_mappable = ScalarMappable(cmap=our_cmap, norm=norm)
proxy_mappable.set_array(surf_map_faces)
cax, kw = make_axes(axes, location='right', fraction=.1,
shrink=.6, pad=.0)
cbar = figure.colorbar(
proxy_mappable, cax=cax, ticks=ticks,
boundaries=bounds, spacing='proportional',
format='%.2g', orientation='vertical')
_crop_colorbar(cbar, cbar_vmin, cbar_vmax)
p3dcollec.set_facecolors(face_colors)
if title is not None:
axes.set_title(title, position=(.5, .95))
# save figure if output file is given
if output_file is not None:
figure.savefig(output_file)
plt.close(figure)
else:
return figure
def _get_faces_on_edge(faces, parc_idx):
'''
Internal function for identifying which faces lie on the outer
edge of the parcellation defined by the indices in parc_idx.
Parameters
----------
faces: numpy.ndarray of shape (n, 3), indices of the mesh faces
parc_idx: numpy.ndarray, indices of the vertices
of the region to be plotted
'''
# count how many vertices belong to the given parcellation in each face
verts_per_face = np.isin(faces, parc_idx).sum(axis=1)
# test if parcellation forms regions
if np.all(verts_per_face < 2):
raise ValueError('Vertices in parcellation do not form region.')
vertices_on_edge = np.intersect1d(np.unique(faces[verts_per_face == 2]),
parc_idx)
faces_outside_edge = np.isin(faces, vertices_on_edge).sum(axis=1)
return np.logical_and(faces_outside_edge > 0, verts_per_face < 3)
def plot_surf_contours(surf_mesh, roi_map, axes=None, figure=None, levels=None,
labels=None, colors=None, legend=False, cmap='tab20',
title=None, output_file=None, **kwargs):
"""
Plotting contours of ROIs on a surface, optionally over a statistical map.
Parameters
----------
surf_mesh: str or list of two numpy.ndarray
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z coordinates
of the mesh vertices, the second containing the indices
(into coords) of the mesh faces.
roi_map: str or numpy.ndarray or list of numpy.ndarray
ROI map to be displayed on the surface mesh, can be a file
(valid formats are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific
files such as .annot or .label), or
a Numpy array with a value for each vertex of the surf_mesh.
The value at each vertex one inside the ROI and zero inside ROI, or an
integer giving the label number for atlases.
axes: instance of matplotlib axes, None, optional
The axes instance to plot to. The projection must be '3d' (e.g.,
`figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
where axes should be passed.).
If None, uses axes from figure if available, else creates new axes.
figure: instance of matplotlib figure, None, optional
The figure instance to plot to.
If None, uses figure of axes if available, else creates a new figure.
levels: list of integers, or None, optional
A list of indices of the regions that are to be outlined.
Every index needs to correspond to one index in roi_map.
If None, all regions in roi_map are used.
labels: list of strings or None, or None, optional
A list of labels for the individual regions of interest.
Provide None as list entry to skip showing the label of that region.
If None no labels are used.
colors: list of matplotlib color names or RGBA values, or None.
legend: boolean, optional
Whether to plot a legend of region's labels.
cmap: matplotlib colormap, str or colormap object, default is None
To use for plotting of the contours. Either a string
which is a name of a matplotlib colormap, or a matplotlib
colormap object.
title : str, optional
Figure title.
output_file: str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf_stat_map : for plotting statistical maps on
brain surfaces.
"""
if figure is None and axes is None:
figure = plot_surf(surf_mesh, **kwargs)
axes = figure.axes[0]
if figure is None:
figure = axes.get_figure()
if axes is None:
axes = figure.axes[0]
if axes.name != '3d':
raise ValueError('Axes must be 3D.')
# test if axes contains Poly3DCollection, if not initialize surface
if not axes.collections or not isinstance(axes.collections[0],
Poly3DCollection):
_ = plot_surf(surf_mesh, axes=axes, **kwargs)
coords, faces = load_surf_mesh(surf_mesh)
roi = load_surf_data(roi_map)
if levels is None:
levels = np.unique(roi_map)
if colors is None:
n_levels = len(levels)
vmax = n_levels
cmap = get_cmap(cmap)
norm = Normalize(vmin=0, vmax=vmax)
colors = [cmap(norm(color_i)) for color_i in range(vmax)]
else:
try:
colors = [to_rgba(color, alpha=1.) for color in colors]
except ValueError:
raise ValueError('All elements of colors need to be either a'
' matplotlib color string or RGBA values.')
if labels is None:
labels = [None] * len(levels)
if not (len(labels) == len(levels) and len(colors) == len(labels)):
raise ValueError('Levels, labels, and colors '
'argument need to be either the same length or None.')
patch_list = []
for level, color, label in zip(levels, colors, labels):
roi_indices = np.where(roi == level)[0]
faces_outside = _get_faces_on_edge(faces, roi_indices)
axes.collections[0]._facecolors3d[faces_outside] = color
if label and legend:
patch_list.append(Patch(color=color, label=label))
# plot legend only if indicated and labels provided
if legend and np.any([lbl is not None for lbl in labels]):
figure.legend(handles=patch_list)
if title:
figure.suptitle(title)
# save figure if output file is given
if output_file is not None:
figure.savefig(output_file)
plt.close(figure)
else:
return figure
def plot_surf_stat_map(surf_mesh, stat_map, bg_map=None,
hemi='left', view='lateral', threshold=None,
alpha='auto', vmax=None, cmap='cold_hot',
colorbar=True, symmetric_cbar="auto", bg_on_data=False,
darkness=1, title=None, output_file=None, axes=None,
figure=None, **kwargs):
""" Plotting a stats map on a surface mesh with optional background
.. versionadded:: 0.3
Parameters
----------
surf_mesh : str or list of two numpy.ndarray
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z
coordinates of the mesh vertices, the second containing the
indices (into coords) of the mesh faces
stat_map : str or numpy.ndarray
Statistical map to be displayed on the surface mesh, can
be a file (valid formats are .gii, .mgz, .nii, .nii.gz, or
Freesurfer specific files such as .thickness, .curv, .sulc, .annot,
.label) or
a Numpy array with a value for each vertex of the surf_mesh.
bg_map : Surface data object (to be defined), optional,
Background image to be plotted on the mesh underneath the
stat_map in greyscale, most likely a sulcal depth map for
realistic shading.
hemi : {'left', 'right'}, default is 'left'
Hemispere to display.
view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},
default is 'lateral'
View of the surface that is rendered.
threshold : a number or None, default is None
If None is given, the image is not thresholded.
If a number is given, it is used to threshold the image,
values below the threshold (in absolute value) are plotted
as transparent.
cmap : matplotlib colormap in str or colormap object, default 'cold_hot'
To use for plotting of the stat_map. Either a string
which is a name of a matplotlib colormap, or a matplotlib
colormap object.
colorbar : bool, optional, default is False
If True, a symmetric colorbar of the statistical map is displayed.
alpha : float, alpha level of the mesh (not the stat_map), default 'auto'
If 'auto' is chosen, alpha will default to .5 when no bg_map is
passed and to 1 if a bg_map is passed.
vmax : upper bound for plotting of stat_map values.
symmetric_cbar : bool or 'auto', optional, default 'auto'
Specifies whether the colorbar should range from -vmax to vmax
or from vmin to vmax. Setting to 'auto' will select the latter
if the range of the whole image is either positive or negative.
Note: The colormap will always range from -vmax to vmax.
bg_on_data : bool, default is False
If True, and a bg_map is specified, the stat_map data is multiplied
by the background image, so that e.g. sulcal depth is visible beneath
the stat_map.
NOTE: that this non-uniformly changes the stat_map values according
to e.g the sulcal depth.
darkness: float, between 0 and 1, default 1
Specifying the darkness of the background image. 1 indicates that the
original values of the background are used. .5 indicates the
background values are reduced by half before being applied.
title : str, optional
Figure title.
output_file: str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
axes: instance of matplotlib axes, None, optional
The axes instance to plot to. The projection must be '3d' (e.g.,
`figure, axes = plt.subplots(subplot_kw={'projection': '3d'})`,
where axes should be passed.).
If None, a new axes is created.
figure: instance of matplotlib figure, None, optional
The figure instance to plot to. If None, a new figure is created.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage: For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf: For brain surface visualization.
"""
# Call _get_colorbar_and_data_ranges to derive symmetric vmin, vmax
# And colorbar limits depending on symmetric_cbar settings
cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(
stat_map, vmax, symmetric_cbar, kwargs)
display = plot_surf(
surf_mesh, surf_map=stat_map, bg_map=bg_map, hemi=hemi, view=view,
avg_method='mean', threshold=threshold, cmap=cmap, colorbar=colorbar,
alpha=alpha, bg_on_data=bg_on_data, darkness=darkness, vmax=vmax,
vmin=vmin, title=title, output_file=output_file, axes=axes,
figure=figure, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax, **kwargs)
return display
def plot_surf_roi(surf_mesh, roi_map, bg_map=None,
hemi='left', view='lateral', threshold=1e-14,
alpha='auto', vmin=None, vmax=None, cmap='gist_ncar',
bg_on_data=False, darkness=1, title=None,
output_file=None, axes=None, figure=None, **kwargs):
""" Plotting ROI on a surface mesh with optional background
.. versionadded:: 0.3
Parameters
----------
surf_mesh : str or list of two numpy.ndarray
Surface mesh geometry, can be a file (valid formats are
.gii or Freesurfer specific files such as .orig, .pial,
.sphere, .white, .inflated) or
a list of two Numpy arrays, the first containing the x-y-z
coordinates of the mesh vertices, the second containing the indices
(into coords) of the mesh faces
roi_map : str or numpy.ndarray or list of numpy.ndarray
ROI map to be displayed on the surface mesh, can be a file
(valid formats are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific
files such as .annot or .label), or
a Numpy array with a value for each vertex of the surf_mesh.
The value at each vertex one inside the ROI and zero inside ROI, or an
integer giving the label number for atlases.
hemi : {'left', 'right'}, default is 'left'
Hemisphere to display.
bg_map : Surface data object (to be defined), optional,
Background image to be plotted on the mesh underneath the
stat_map in greyscale, most likely a sulcal depth map for
realistic shading.
view: {'lateral', 'medial', 'dorsal', 'ventral', 'anterior', 'posterior'},
default is 'lateral'
View of the surface that is rendered.
threshold: a number or None
default is 1e-14 to threshold regions that are labelled 0. If you want
to use 0 as a label, set threshold to None.
cmap : matplotlib colormap str or colormap object, default 'gist_ncar'
To use for plotting of the rois. Either a string which is a name
of a matplotlib colormap, or a matplotlib colormap object.
alpha : float, default is 'auto'
Alpha level of the mesh (not the stat_map). If default,
alpha will default to .5 when no bg_map is passed
and to 1 if a bg_map is passed.
bg_on_data : bool, default is False
If True, and a bg_map is specified, the stat_map data is multiplied
by the background image, so that e.g. sulcal depth is visible beneath
the stat_map. Beware that this non-uniformly changes the stat_map
values according to e.g the sulcal depth.
darkness : float, between 0 and 1, default is 1
Specifying the darkness of the background image. 1 indicates that the
original values of the background are used. .5 indicates the background
values are reduced by half before being applied.
title : str, optional
Figure title.
output_file: str, or None, optional
The name of an image file to export plot to. Valid extensions
are .png, .pdf, .svg. If output_file is not None, the plot
is saved to a file, and the display is closed.
axes: Axes instance | None
The axes instance to plot to. The projection must be '3d' (e.g.,
`plt.subplots(subplot_kw={'projection': '3d'})`).
If None, a new axes is created.
figure: Figure instance | None
The figure to plot to. If None, a new figure is created.
See Also
--------
nilearn.datasets.fetch_surf_fsaverage: For surface data object to be
used as background map for this plotting function.
nilearn.plotting.plot_surf: For brain surface visualization.
"""
# preload roi and mesh to determine vmin, vmax and give more useful error
# messages in case of wrong inputs
roi = load_surf_data(roi_map)
if vmin is None:
vmin = np.min(roi)
if vmax is None:
vmax = 1 + np.max(roi)
mesh = load_surf_mesh(surf_mesh)
if roi.ndim != 1:
raise ValueError('roi_map can only have one dimension but has '
'%i dimensions' % roi.ndim)
if roi.shape[0] != mesh[0].shape[0]:
raise ValueError('roi_map does not have the same number of vertices '
'as the mesh. If you have a list of indices for the '
'ROI you can convert them into a ROI map like this:\n'
'roi_map = np.zeros(n_vertices)\n'
'roi_map[roi_idx] = 1')
display = plot_surf(mesh, surf_map=roi, bg_map=bg_map,
hemi=hemi, view=view, avg_method='median',
threshold=threshold, cmap=cmap, alpha=alpha,
bg_on_data=bg_on_data, darkness=darkness,
vmin=vmin, vmax=vmax, title=title,
output_file=output_file, axes=axes,
figure=figure, **kwargs)
return display
| [
"numpy.isin",
"numpy.abs",
"matplotlib.cm.get_cmap",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.patches.Patch",
"numpy.unique",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.colors.to_rgba",
"matplotlib.colors.Normalize",
"matplot... | [((7736, 7764), 'numpy.ones', 'np.ones', (['(faces.shape[0], 4)'], {}), '((faces.shape[0], 4))\n', (7743, 7764), True, 'import numpy as np\n'), ((8084, 8115), 'numpy.mean', 'np.mean', (['bg_data[faces]'], {'axis': '(1)'}), '(bg_data[faces], axis=1)\n', (8091, 8115), True, 'import numpy as np\n'), ((8324, 8347), 'matplotlib.pyplot.cm.gray_r', 'plt.cm.gray_r', (['bg_faces'], {}), '(bg_faces)\n', (8337, 8347), True, 'import matplotlib.pyplot as plt\n'), ((12298, 12324), 'numpy.all', 'np.all', (['(verts_per_face < 2)'], {}), '(verts_per_face < 2)\n', (12304, 12324), True, 'import numpy as np\n'), ((12607, 12665), 'numpy.logical_and', 'np.logical_and', (['(faces_outside_edge > 0)', '(verts_per_face < 3)'], {}), '(faces_outside_edge > 0, verts_per_face < 3)\n', (12621, 12665), True, 'import numpy as np\n'), ((6610, 6660), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (["plt.rcParamsDefault['image.cmap']"], {}), "(plt.rcParamsDefault['image.cmap'])\n", (6625, 6660), True, 'import matplotlib.pyplot as plt\n'), ((6945, 7004), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['figure'], {'rect': '[0, 0, 1, 1]', 'xlim': 'limits', 'ylim': 'limits'}), '(figure, rect=[0, 0, 1, 1], xlim=limits, ylim=limits)\n', (6951, 7004), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((11685, 11702), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (11694, 11702), True, 'import matplotlib.pyplot as plt\n'), ((12438, 12475), 'numpy.unique', 'np.unique', (['faces[verts_per_face == 2]'], {}), '(faces[verts_per_face == 2])\n', (12447, 12475), True, 'import numpy as np\n'), ((16416, 16434), 'numpy.unique', 'np.unique', (['roi_map'], {}), '(roi_map)\n', (16425, 16434), True, 'import numpy as np\n'), ((16528, 16542), 'matplotlib.cm.get_cmap', 'get_cmap', (['cmap'], {}), '(cmap)\n', (16536, 16542), False, 'from matplotlib.cm import ScalarMappable, get_cmap\n'), ((16558, 16586), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': 'vmax'}), '(vmin=0, vmax=vmax)\n', (16567, 16586), False, 'from matplotlib.colors import Normalize, LinearSegmentedColormap, to_rgba\n'), ((17610, 17655), 'numpy.any', 'np.any', (['[(lbl is not None) for lbl in labels]'], {}), '([(lbl is not None) for lbl in labels])\n', (17616, 17655), True, 'import numpy as np\n'), ((17860, 17877), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (17869, 17877), True, 'import matplotlib.pyplot as plt\n'), ((26942, 26953), 'numpy.min', 'np.min', (['roi'], {}), '(roi)\n', (26948, 26953), True, 'import numpy as np\n'), ((6791, 6812), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (6806, 6812), True, 'import matplotlib.pyplot as plt\n'), ((6917, 6929), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6927, 6929), True, 'import matplotlib.pyplot as plt\n'), ((7807, 7831), 'numpy.ones', 'np.ones', (['coords.shape[0]'], {}), '(coords.shape[0])\n', (7814, 7831), True, 'import numpy as np\n'), ((9084, 9121), 'numpy.mean', 'np.mean', (['surf_map_data[faces]'], {'axis': '(1)'}), '(surf_map_data[faces], axis=1)\n', (9091, 9121), True, 'import numpy as np\n'), ((9336, 9361), 'numpy.nanmin', 'np.nanmin', (['surf_map_faces'], {}), '(surf_map_faces)\n', (9345, 9361), True, 'import numpy as np\n'), ((9406, 9431), 'numpy.nanmax', 'np.nanmax', (['surf_map_faces'], {}), '(surf_map_faces)\n', (9415, 9431), True, 'import numpy as np\n'), ((9522, 9556), 'numpy.arange', 'np.arange', (['surf_map_faces.shape[0]'], {}), '(surf_map_faces.shape[0])\n', (9531, 9556), True, 'import numpy as np\n'), ((10082, 10096), 'matplotlib.cm.get_cmap', 'get_cmap', (['cmap'], {}), '(cmap)\n', (10090, 10096), False, 'from matplotlib.cm import ScalarMappable, get_cmap\n'), ((10116, 10147), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (10125, 10147), False, 'from matplotlib.colors import Normalize, LinearSegmentedColormap, to_rgba\n'), ((10194, 10227), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', 'nb_ticks'], {}), '(vmin, vmax, nb_ticks)\n', (10205, 10227), True, 'import numpy as np\n'), ((10249, 10284), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', 'our_cmap.N'], {}), '(vmin, vmax, our_cmap.N)\n', (10260, 10284), True, 'import numpy as np\n'), ((10969, 11009), 'matplotlib.cm.ScalarMappable', 'ScalarMappable', ([], {'cmap': 'our_cmap', 'norm': 'norm'}), '(cmap=our_cmap, norm=norm)\n', (10983, 11009), False, 'from matplotlib.cm import ScalarMappable, get_cmap\n'), ((11085, 11153), 'matplotlib.colorbar.make_axes', 'make_axes', (['axes'], {'location': '"""right"""', 'fraction': '(0.1)', 'shrink': '(0.6)', 'pad': '(0.0)'}), "(axes, location='right', fraction=0.1, shrink=0.6, pad=0.0)\n", (11094, 11153), False, 'from matplotlib.colorbar import make_axes\n'), ((12212, 12236), 'numpy.isin', 'np.isin', (['faces', 'parc_idx'], {}), '(faces, parc_idx)\n', (12219, 12236), True, 'import numpy as np\n'), ((12550, 12582), 'numpy.isin', 'np.isin', (['faces', 'vertices_on_edge'], {}), '(faces, vertices_on_edge)\n', (12557, 12582), True, 'import numpy as np\n'), ((17290, 17312), 'numpy.where', 'np.where', (['(roi == level)'], {}), '(roi == level)\n', (17298, 17312), True, 'import numpy as np\n'), ((26994, 27005), 'numpy.max', 'np.max', (['roi'], {}), '(roi)\n', (27000, 27005), True, 'import numpy as np\n'), ((9188, 9227), 'numpy.median', 'np.median', (['surf_map_data[faces]'], {'axis': '(1)'}), '(surf_map_data[faces], axis=1)\n', (9197, 9227), True, 'import numpy as np\n'), ((10798, 10868), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""Custom cmap"""', 'cmaplist', 'our_cmap.N'], {}), "('Custom cmap', cmaplist, our_cmap.N)\n", (10831, 10868), False, 'from matplotlib.colors import Normalize, LinearSegmentedColormap, to_rgba\n'), ((16698, 16723), 'matplotlib.colors.to_rgba', 'to_rgba', (['color'], {'alpha': '(1.0)'}), '(color, alpha=1.0)\n', (16705, 16723), False, 'from matplotlib.colors import Normalize, LinearSegmentedColormap, to_rgba\n'), ((17503, 17534), 'matplotlib.patches.Patch', 'Patch', ([], {'color': 'color', 'label': 'label'}), '(color=color, label=label)\n', (17508, 17534), False, 'from matplotlib.patches import Patch\n'), ((9607, 9629), 'numpy.abs', 'np.abs', (['surf_map_faces'], {}), '(surf_map_faces)\n', (9613, 9629), True, 'import numpy as np\n')] |
import lens_control
import numpy
from PyQt4 import QtGui, QtCore, Qt
class controller(QtCore.QThread):
def __init__(self,interface):
QtCore.QThread.__init__(self)
self.interface=interface
self.lens=lens_control.lens(self.interface,False)
self.newinput=False
self.opmode="GO base"
self.input=numpy.zeros(18)
def change_op(self):
if self.interface.Base_box.currentText()=="Voltages":
self.opmode="Voltages"
if self.interface.Base_box.currentText()=="GO base":
self.opmode="GO base"
if self.interface.Base_box.currentText()=="Zernikes":
self.opmode="Zernikes"
def set_values(self,values):
self.input=values
self.newinput=True
def relax(self):
self.interface.optimizer.running=False
self.interface.optimizer.reset
self.lens.relax()
self.set_values(numpy.zeros(18))
self.interface.set_values(numpy.zeros(18))
def run(self):
while True:
if self.newinput:
if self.opmode=="Voltages":
self.lens.setvoltages(self.input)
if self.opmode=="Zernikes":
self.lens.setzernikes(self.input)
if self.opmode=="GO base":
self.lens.setort(self.input)
self.newinput=False
| [
"lens_control.lens",
"PyQt4.QtCore.QThread.__init__",
"numpy.zeros"
] | [((154, 183), 'PyQt4.QtCore.QThread.__init__', 'QtCore.QThread.__init__', (['self'], {}), '(self)\n', (177, 183), False, 'from PyQt4 import QtGui, QtCore, Qt\n'), ((247, 287), 'lens_control.lens', 'lens_control.lens', (['self.interface', '(False)'], {}), '(self.interface, False)\n', (264, 287), False, 'import lens_control\n'), ((367, 382), 'numpy.zeros', 'numpy.zeros', (['(18)'], {}), '(18)\n', (378, 382), False, 'import numpy\n'), ((1006, 1021), 'numpy.zeros', 'numpy.zeros', (['(18)'], {}), '(18)\n', (1017, 1021), False, 'import numpy\n'), ((1058, 1073), 'numpy.zeros', 'numpy.zeros', (['(18)'], {}), '(18)\n', (1069, 1073), False, 'import numpy\n')] |
import struct, Filter
# check format for the XBox Data
def to_printable(c):
if 33 <= ord(c) <= 125:
return c
return "."
def hexdump(blob):
bytes = map(lambda c : "%02X" % ord(c), blob)
bytes.extend([" "] * 0x10)
line = []
for offset in range(0, len(bytes)/0x10):
s = offset * 0x10
e = s + 0x10
col_hex = " ".join(bytes[s:e])
col_str = "".join(map(to_printable, blob[s:e]))
if col_str != "":
line.append("%08X: %s %s" % (s, col_hex, col_str))
return "\n".join(line)
def loadWavFileCore(filename):
with open(filename, "rb") as f:
fileContent = f.read()
dataSize = struct.unpack("<l", fileContent[76:80])[0]
#chunkSize = struct.unpack("h", fileContent[16:18])
#position = 40+chunkSize[0]-16
if(dataSize != len(fileContent)-80):
print("error processing " + filename)
exit(0)
wav = struct.unpack(str(dataSize/2)+"h", fileContent[80:])
wav = map(lambda x:float(x)/32768, wav)
return wav
class FileLengthError(Exception):
def __str__(self):
return repr('FileLengthError')
def bandFilters(wav, skipFirst):
rangeend = len(wav)/480
frames = []
if(skipFirst): startI = 1
else: startI = 0
for i in range(rangeend):
try:
res = Filter.apply_filter(wav[i*480:(i+1)*480])[startI:]
except Exception as e:
print(e)
raise Exception('LogSumError')
frames.append(res)
return frames
def loadWavIntoFrames(filename, strip=False, earlyclip=False, skipFirst=True):
wav = loadWavFileCore(filename)
if(len(wav) == 0): return []
if(earlyclip):
wav = removeEarlyClip(wav)
if(strip):
wav = stripSilence(wav)
return bandFilters(wav, skipFirst)
import os
def getFileList(filename):
with open(filename, "r") as f:
files = f.readlines()
files = map(lambda x:x.strip(), files)
files = map(lambda x:os.sep.join(x.split("\\")), files)
return files
import math
def loadFrames(filename, skipFirst = True):
with open(filename) as f:
lines = f.readlines()
frames = []
for line in lines:
data = map(float, line.strip().split(","))
if(sum(map(math.isnan, data)) > 0):
print("NAN skipping!!", filename)
return []
if(skipFirst):
frames.append( data[1:] )
else:
frames.append( data )
return frames
def removeEarlyClip(wav):
for i in range(len(wav)):
if(wav[i] >=0):
return wav[i:]
import numpy as np
def stripSilence(wav):
abs_wav = map(abs, wav)
start = 0
len_idx = len(wav)/480
for i in range(len_idx):
avg_val = np.average(abs_wav[i*480:(i+1)*480])
if(avg_val > 0.01):
start = i
break
end = len_idx
for i in range(len_idx):
avg_val = np.average(abs_wav[(len_idx-i-1)*480:(len_idx-i)*480])
if(avg_val > 0.01):
end = len_idx - i
break
return wav[start*480:end*480]
def addFramesIntoMatrix(X, frames, unit=3):
for i in range(len(frames)-unit+1):
t = []
for j in range(unit):
t += frames[i+j]
X.append(t)
return len(frames)-unit+1
| [
"Filter.apply_filter",
"struct.unpack",
"numpy.average"
] | [((2839, 2881), 'numpy.average', 'np.average', (['abs_wav[i * 480:(i + 1) * 480]'], {}), '(abs_wav[i * 480:(i + 1) * 480])\n', (2849, 2881), True, 'import numpy as np\n'), ((3010, 3074), 'numpy.average', 'np.average', (['abs_wav[(len_idx - i - 1) * 480:(len_idx - i) * 480]'], {}), '(abs_wav[(len_idx - i - 1) * 480:(len_idx - i) * 480])\n', (3020, 3074), True, 'import numpy as np\n'), ((680, 719), 'struct.unpack', 'struct.unpack', (['"""<l"""', 'fileContent[76:80]'], {}), "('<l', fileContent[76:80])\n", (693, 719), False, 'import struct, Filter\n'), ((1360, 1407), 'Filter.apply_filter', 'Filter.apply_filter', (['wav[i * 480:(i + 1) * 480]'], {}), '(wav[i * 480:(i + 1) * 480])\n', (1379, 1407), False, 'import struct, Filter\n')] |
## LSDMap_BasicManipulation.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with rasters
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## SMM
## 26/07/2014
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from . import LSDMap_OSystemTools as LSDOst
from . import LSDMap_GDALIO as LSDMap_IO
from pyproj import Proj, transform
def GetUTMEastingNorthing(EPSG_string,latitude,longitude):
"""This returns the easting and northing for a given latitude and longitide
Args:
ESPG_string (str): The ESPG code. 326XX is for UTM north and 327XX is for UTM south
latitude (float): The latitude in WGS84
longitude (float): The longitude in WGS84
Returns:
easting,northing The easting and northing in the UTM zone of your selection
Author:
<NAME>
"""
#print "Yo, getting this stuff: "+EPSG_string
# The lat long are in epsg 4326 which is WGS84
inProj = Proj(init='epsg:4326')
outProj = Proj(init=EPSG_string)
ea,no = transform(inProj,outProj,longitude,latitude)
return ea,no
def ConvertNorthingForImshow(RasterName,Northing):
"""This returns a northing that is inverted using the minimum and maximum values from the raster for use in imshow (because imshow inverts the raster)
Args:
RasterName (str): The raster's name with full path and extension
Northing (float): The northing coordinate in metres (from UTM WGS84)
Returns:
ConvertedNorthing The northing inverted from top to bottom
Author: SMM
"""
extent_raster = LSDMap_IO.GetRasterExtent(RasterName)
Northing_converted = extent_raster[3]-Northing
Northing_converted = Northing_converted+extent_raster[2]
return Northing_converted
#==============================================================================
# THis function takes a raster an writes a new raster where everything below a
# threshold is set to nodata
#==============================================================================
def SetNoDataBelowThreshold(raster_filename,new_raster_filename, threshold = 0, driver_name = "ENVI", NoDataValue = -9999):
"""This takes a raster an then converts all data below a threshold to nodata, it then prints the resulting raster.
Args:
raster_filename (str): The raster's name with full path and extension
new_raster_filename (str): The name of the raster to be printed
threshold (float): Data below this in the original raster will be converted to nodata.
driver_name (str): The raster format (see gdal documentation for options. LSDTopoTools used "ENVI" format.)
NoDataValue (float): The nodata value. Usually set to -9999.
Returns:
None, but prints a new raster to file
Author: SMM
"""
# read the data
rasterArray = LSDMap_IO.ReadRasterArrayBlocks(raster_filename)
print("Read the data")
# set any point on the raster below the threshold as nodata
rasterArray[rasterArray <= threshold] = NoDataValue
print("Reset raster values")
# write the data to a new file
LSDMap_IO.array2raster(raster_filename,new_raster_filename,rasterArray,driver_name, NoDataValue)
print("Wrote raster")
#==============================================================================
#==============================================================================
# This function sets all nodata values to a constant value
#==============================================================================
def SetToConstantValue(raster_filename,new_raster_filename, constant_value, driver_name = "ENVI"):
"""This takes a raster an then converts all non-nodata to a constant value.
This is useful if you want to make masks, for example to have blocks of single erosion rates for cosmogenic calculations.
Args:
raster_filename (str): The raster's name with full path and extension
new_raster_filename (str): The name of the raster to be printed
constant_value (float): All non-nodata will be converted to this value in a new raster.
driver_name (str): The raster format (see gdal documentation for options. LSDTopoTools used "ENVI" format.)
NoDataValue (float): The nodata value. Usually set to -9999.
Returns:
None, but prints a new raster to file
Author: SMM
"""
# get the nodata value
NoDataValue = LSDMap_IO.getNoDataValue(raster_filename)
# read the data
rasterArray = LSDMap_IO.ReadRasterArrayBlocks(raster_filename)
print("Read the data")
# set any nodata to a constant value
rasterArray[rasterArray != NoDataValue] = constant_value
print("Changed to a constant value")
# write the data to a new file
LSDMap_IO.array2raster(raster_filename,new_raster_filename,rasterArray,driver_name, NoDataValue)
print("Wrote raster")
#==============================================================================
# This function calcualtes a hillshade and writes to file
#==============================================================================
def GetHillshade(raster_filename,new_raster_filename, azimuth = 315, angle_altitude = 45, driver_name = "ENVI", NoDataValue = -9999):
"""This calls the hillshade function from the basic manipulation package, but then prints the resulting raster to file.
Args:
raster_filename (str): The raster's name with full path and extension
new_raster_filename (str): The name of the raster to be printed
azimuth (float): Azimuth angle (compass direction) of the sun (in degrees).
angle_altitude (float):Altitude angle of the sun.
driver_name (str): The raster format (see gdal documentation for options. LSDTopoTools used "ENVI" format.)
NoDataValue (float): The nodata value. Usually set to -9999.
Returns:
None, but prints a new raster to file.
Author: SMM
"""
# avoid circular import
from . import LSDMap_BasicPlotting as LSDMBP
# get the hillshade
hillshade_raster = LSDMBP.Hillshade(raster_filename, azimuth, angle_altitude)
# write to file
LSDMap_IO.array2raster(raster_filename,new_raster_filename,hillshade_raster,driver_name, NoDataValue)
#==============================================================================
# This takes a grouping list of basin keys and transforms it into a list
# of junction names
#==============================================================================
def BasinKeyToJunction(grouped_data_list,thisPointData):
"""This takes a basin_info_csv file (produced by several LSDTopoTools routies) and spits out lists of the junction numbers (it converts basin numbers to junction numbers).
Args:
grouped_data_list (int list): A list of list of basin numbers
thisPointData (str): A point data object with the basins
Returns:
Junction_grouped_list: the junction numbers of the basins.
Author: SMM
"""
thisJunctionData = thisPointData.QueryData("outlet_junction")
junction_grouped_list = []
if not grouped_data_list:
return grouped_data_list
else:
for group in grouped_data_list:
this_list = []
for element in group:
this_list.append(thisJunctionData[element])
junction_grouped_list.append(this_list)
print(junction_grouped_list)
return junction_grouped_list
def BasinOrderToBasinRenameList(basin_order_list):
"""When we take data from the basins they will be numbered accoring to their junction rank, which is controlled by flow routing.
The result is often numbered basins that have something that appears random to human eyes.
We have developed a routine to renumber these basins.
However, the way this works is to find a basin number and rename in the profile plots,
such that when it finds a basin number it will rename that.
So if you want to rename the seventh basin 0, you need to give a list where the seventh element is 0.
This is a pain because how one would normally order basins would be to look at the image of the basin numbers,
and then write the order in which you want those basins to appear.
This function converts between these two lists. You give the function the order you want the basins to appear,
and it gives a renaming list.
Args:
basin_order_list (int): the list of basins in which you want them to appear in the numbering scheme
Return:
The index into the returned basins
Author: SMM
"""
#basin_dict = {}
max_basin = max(basin_order_list)
basin_rename_list = [0] * max_basin
basin_rename_list.append(0)
print(("length is: "+str(len(basin_rename_list))))
# Swap the keys
for idx,basin in enumerate(basin_order_list):
print(("Index: "+str(idx)+", basin: "+str(basin)))
basin_rename_list[basin] = idx
#print basin_rename_list
return basin_rename_list
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## This function orders basins in a sequence, so that plots can be made
## as a function of distance, for example
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
def BasinOrderer(thisPointData, FileName, criteria_string,reverse=False,
exclude_criteria_string = "None", exlude_criteria_greater = False,
exclude_criteria_value = 0 ):
#========================================================
# now we need to label the basins
# Now we get the chi points
EPSG_string = LSDMap_IO.GetUTMEPSG(FileName)
print("EPSG string is: " + EPSG_string)
# convert to easting and northing
[easting,northing] = thisPointData.GetUTMEastingNorthingFromQuery(EPSG_string,"outlet_latitude","outlet_longitude")
these_data = thisPointData.QueryData("outlet_junction")
#print M_chi
these_data = [int(x) for x in these_data]
wanted_data = thisPointData.QueryData(criteria_string)
wd = np.asarray(wanted_data)
# Now we exclude some of the basins
#if exclude_criteria_string != "None":
# exclude_data = thisPointData.QueryData(exclude_criteria_string)
indices = np.argsort(wd)
print("data is: ")
print(wd)
if reverse:
indices = indices[::-1]
sorted_basins = np.array(these_data)[indices]
print(sorted_basins)
#==============================================================================
# This function takes groups of data and then resets values in a
# raster to mimic these values
#==============================================================================
def RedefineIntRaster(rasterArray,grouped_data_list,spread):
"""This function takes values from an integer raster and renames them based on a list.
It is useful for renaming basin numbers.
Args:
rasterArray (np.array): The raster array
grouped_data_list (int): A list of lists containing groups to be redefined
spread (int): How big of a difference between groups. For plotting this helps to generate differenc colours.
Returns:
np.array: The new array
Author: SMM
"""
counter = 0
if not grouped_data_list:
return rasterArray
else:
for group in grouped_data_list:
for element in group:
rasterArray[rasterArray == element] = counter
counter= counter+1
counter = counter+spread
return rasterArray
#==============================================================================
# This function takes groups of data and then resets values in a
# raster to mimic these values
#==============================================================================
def MaskByCategory(rasterArray,rasterForMasking,data_list):
"""This function takes values from an integer raster and renames them based on a list.
It is useful for renaming basin numbers.
Args:
rasterArray (np.array): The raster array
grouped_data_list (int): A list of lists containing groups to be redefined
spread (int): How big of a difference between groups. For plotting this helps to generate differenc colours.
Returns:
np.array: The new array
Author: SMM
"""
# The -9090 is just a placeholder
for item in data_list:
rasterForMasking[rasterForMasking == item] = -9090
rasterArray[rasterForMasking != -9090] = np.nan
return rasterArray
#==============================================================================
# This function takes groups of data and then resets values in a
# raster to mimic these values
#==============================================================================
def NanBelowThreshold(rasterArray,threshold):
"""This function takes an array and turns any element below threshold to a nan
It is useful for renaming basin numbers.
Args:
rasterArray (np.array): The raster array
threshold (int): The threshold value
Returns:
np.array: The new array
Author: SMM
"""
# The -9090 is just a placeholder
rasterArray[rasterArray < threshold] = np.nan
return rasterArray
#==============================================================================
# This does a basic mass balance.
# Assumes all units are metres
#==============================================================================
def RasterMeanValue(path, file1):
"""This takes the average of a raster.
Args:
path (str): The path to the raster
file1 (str): The name of the file
Returns:
mean_value: The mean
Author: SMM
"""
# make sure names are in correct format
NewPath = LSDOst.AppendSepToDirectoryPath(path)
raster_file1 = NewPath+file1
NPixels = LSDMap_IO.GetNPixelsInRaster(raster_file1)
Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1,raster_band=1)
mean_value = np.sum(Raster1)/float(NPixels)
return mean_value
#==============================================================================
# This does a very basic swath analysis in one direction
# if axis is 0, this is along x axis, if axis is 1, is along y axis
# otherwise will throw error
#==============================================================================
def SimpleSwath(path, file1, axis):
"""This function averages all the data along one of the directions
Args:
path (str): The path to the files
file1 (str): The name of the first raster.
axis (int): Either 0 (rows) or 1 (cols)
Returns:
float: A load of information about the swath.
* means
* medians
* std_deviations
* twentyfifth_percentile
* seventyfifth_percentile
at each node across the axis of the swath.
Author: SMM
"""
# make sure names are in correct format
NewPath = LSDOst.AppendSepToDirectoryPath(path)
raster_file1 = NewPath+file1
# get some information about the raster
NDV, xsize, ysize, GeoT, Projection, DataType = LSDMap_IO.GetGeoInfo(raster_file1)
print("NDV is: ")
print(NDV)
if NDV == None:
NDV = -9999
print("No NDV defined")
Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1,raster_band=1)
#nan_raster = Raster1[Raster1==NDV]=np.nan
#print nan_raster
#now mask the nodata
masked_Raster1 = np.ma.masked_values(Raster1, NDV)
means = np.mean(masked_Raster1, axis)
medians = np.median(masked_Raster1, axis)
std_deviations = np.std(masked_Raster1, axis)
twentyfifth_percentile = np.percentile(masked_Raster1, 25, axis)
seventyfifth_percentile = np.percentile(masked_Raster1, 75, axis)
# This stuff only works with numpy 1.8 or later, wich we don't have
#means = np.nanmean(nan_raster, axis)
#medians = np.nanmedian(nan_raster, axis)
#std_deviations = np.nanstd(nan_raster, axis)
#twentyfifth_percentile = np.nanpercentile(nan_raster, 25, axis)
#seventyfifth_percentile = np.nanpercentile(nan_raster, 75, axis)
#print means
#print medians
#print std_deviations
#print twentyfifth_percentile
#print seventyfifth_percentile
return means,medians,std_deviations,twentyfifth_percentile,seventyfifth_percentile
#==============================================================================
# This does a basic mass balance.
# Assumes all units are metres
#==============================================================================
def BasicMassBalance(path, file1, file2):
"""This function checks the difference in "volume" between two rasters.
Args:
path (str): The path to the files
file1 (str): The name of the first raster.
file2 (str): The name of the second raster
Returns:
float: The differnece in the volume betweeen the two rasters
Author: SMM
"""
# make sure names are in correct format
NewPath = LSDOst.AppendSepToDirectoryPath(path)
raster_file1 = NewPath+file1
raster_file2 = NewPath+file2
PixelArea = LSDMap_IO.GetPixelArea(raster_file1)
print("PixelArea is: " + str(PixelArea))
print("The formatted path is: " + NewPath)
Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1,raster_band=1)
Raster2 = LSDMap_IO.ReadRasterArrayBlocks(raster_file2,raster_band=1)
NewRaster = np.subtract(Raster2,Raster1)
mass_balance = np.sum(NewRaster)*PixelArea
print("linear dif " + str(np.sum(NewRaster)))
return mass_balance
| [
"numpy.sum",
"numpy.subtract",
"numpy.ma.masked_values",
"numpy.median",
"numpy.std",
"numpy.asarray",
"numpy.argsort",
"numpy.percentile",
"numpy.mean",
"pyproj.Proj",
"numpy.array",
"pyproj.transform"
] | [((1156, 1178), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (1160, 1178), False, 'from pyproj import Proj, transform\n'), ((1193, 1215), 'pyproj.Proj', 'Proj', ([], {'init': 'EPSG_string'}), '(init=EPSG_string)\n', (1197, 1215), False, 'from pyproj import Proj, transform\n'), ((1228, 1275), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'longitude', 'latitude'], {}), '(inProj, outProj, longitude, latitude)\n', (1237, 1275), False, 'from pyproj import Proj, transform\n'), ((10253, 10276), 'numpy.asarray', 'np.asarray', (['wanted_data'], {}), '(wanted_data)\n', (10263, 10276), True, 'import numpy as np\n'), ((10449, 10463), 'numpy.argsort', 'np.argsort', (['wd'], {}), '(wd)\n', (10459, 10463), True, 'import numpy as np\n'), ((15660, 15693), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Raster1', 'NDV'], {}), '(Raster1, NDV)\n', (15679, 15693), True, 'import numpy as np\n'), ((15707, 15736), 'numpy.mean', 'np.mean', (['masked_Raster1', 'axis'], {}), '(masked_Raster1, axis)\n', (15714, 15736), True, 'import numpy as np\n'), ((15751, 15782), 'numpy.median', 'np.median', (['masked_Raster1', 'axis'], {}), '(masked_Raster1, axis)\n', (15760, 15782), True, 'import numpy as np\n'), ((15804, 15832), 'numpy.std', 'np.std', (['masked_Raster1', 'axis'], {}), '(masked_Raster1, axis)\n', (15810, 15832), True, 'import numpy as np\n'), ((15862, 15901), 'numpy.percentile', 'np.percentile', (['masked_Raster1', '(25)', 'axis'], {}), '(masked_Raster1, 25, axis)\n', (15875, 15901), True, 'import numpy as np\n'), ((15932, 15971), 'numpy.percentile', 'np.percentile', (['masked_Raster1', '(75)', 'axis'], {}), '(masked_Raster1, 75, axis)\n', (15945, 15971), True, 'import numpy as np\n'), ((17627, 17656), 'numpy.subtract', 'np.subtract', (['Raster2', 'Raster1'], {}), '(Raster2, Raster1)\n', (17638, 17656), True, 'import numpy as np\n'), ((10572, 10592), 'numpy.array', 'np.array', (['these_data'], {}), '(these_data)\n', (10580, 10592), True, 'import numpy as np\n'), ((14192, 14207), 'numpy.sum', 'np.sum', (['Raster1'], {}), '(Raster1)\n', (14198, 14207), True, 'import numpy as np\n'), ((17676, 17693), 'numpy.sum', 'np.sum', (['NewRaster'], {}), '(NewRaster)\n', (17682, 17693), True, 'import numpy as np\n'), ((17735, 17752), 'numpy.sum', 'np.sum', (['NewRaster'], {}), '(NewRaster)\n', (17741, 17752), True, 'import numpy as np\n')] |
# moma.py
# This python script automates MOMA as described in Ishibuchi (2009).
# Author: <NAME>
# i - bin index
# j - item index
# k - algorithm index
# m - solution index
# t - generation index
from __future__ import print_function
import binpacking as bp
import constraints
import ga
import mop
import numpy as np
import os
import outformat as outf
import random
import solutions as sols
from datetime import datetime
from glob import glob
from items import makeitems
from operator import attrgetter
def moma(n, folder, datafile):
existing_files = glob(folder + '*.out')
filename = folder + 'run%d.out' % (len(existing_files) + 1)
data = folder + datafile
# Initialize algorithm
pop = 100 # members per gen.
end = 25000 # function evaluations or 250 gen.
outf.startout(filename, n, end, data, 'MOMA')
startt = datetime.now()
print(' ', startt)
print('*******************************************************************************')
print(' Method: MOMA\n')
print(' data: ', datafile)
print('*******************************************************************************')
binc, binh, items = makeitems(data)
bpp = bp.BPP(n, binc, binh, items)
moop = mop.MOproblem(pop, items)
gen = Generation(n, pop, end, items, bpp, moop)
gen.initialp()
gen.makeq()
# NSGA-II - Local search
while not gen.endgen():
gen.rungen()
outf.genout(filename, gen.gett(), pop, gen.getq(), [gen.getarch(), []])
# Get final nondominated set
aslimit = 75
r, allfronts = fnds(gen.getarch())
if len(allfronts[0]) > aslimit:
gen.fittruncation(allfronts[0], aslimit)
ndset = approx(gen.getarch())
# Make output
see(ndset, folder)
import csv
with open(folder + 'ApproximationSet.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile, dialect='excel', delimiter=',')
csvwriter.writerow(['Solution ID', 'f[1]', 'f[2]', 'f[3]'])
for m in ndset:
csvwriter.writerow([m.getindex(), m.getbins(), m.getmaxh(), m.getavgw()])
outf.endout(filename)
print('This algorithm has completed successfully.')
class Generation:
def __init__(self, n, popsize, end, items, bpp, moop):
self.n = int(n)
self.pop = int(popsize)
self.end = int(end)
self.items = items
self.bpp = bpp
self.moop = moop
self.t = 0
self.idnum = 0
self.p = []
self.q = []
self.archive = [] # Solution Archive
self.newgenes = [] # List of new genes
self.funkeval = 0
def rungen(self):
self.t += 1
#if self.t % 50 == 0:
gent = datetime.now()
print(' ', gent)
print('t = ', self.t)
print('Selecting parent population...')
self.makep()
print('GA operation: binary selection...')
if self.t == 1:
self.q = ga.binsel(self.p, self.pop, 'elitism')
else:
self.q = ga.binsel(self.p, self.pop, 'cco')
print('GA operation: crossover...')
self.newgenes = ga.xover(self.q, self.pop, 0.8)
print('GA operation: mutation...')
self.evoloperators()
if self.t % 10 == 0:
self.localsearch()
print(self.funkeval, 'function evaluations have been performed.\n')
def initialp(self):
# This function creates the initial set of chromosomes for gen. 0
# Create list of items, i.e. chromosomes
chromosomes = []
for j in range(self.n):
chromosomes.append(self.items[j].getindex())
# Generate popsize random combinations of the chromosomes
random.seed(52)
self.newgenes = []
for i in range(self.pop):
x = random.sample(chromosomes, len(chromosomes))
self.newgenes.append(x)
def makep(self):
r = self.p + self.q
r, fronts = fnds(r)
print(' There are currently ', len(fronts[0]), 'solutions in the Approximate Set.')
fronts[0] = self.cleanarchive(fronts[0])
if self.t == 1:
self.p = r
else:
self.fill(fronts)
def fill(self, fronts):
# This module fills the parent population based on the crowded
# comparison operator.
self.p = []
k = 0
fronts[k] = cda(fronts[k], 3)
while (len(self.p) + len(fronts[k])) < self.pop:
self.p = self.p + fronts[k]
k += 1
if fronts[k] == []:
needed = self.pop - len(self.p)
for n in range(needed):
if n <= len(fronts[0]):
m = random.choice(fronts[0])
else:
m = random.choice(fronts[1])
nls = 5
mneighbors = self.paretols(m, 5, retrieve=True)
while mneighbors == []:
nls += 5
mneighbors = self.paretols(m, nls, retrieve=True)
fronts[k].append(random.choice(mneighbors))
fronts[k] = cda(fronts[k], 3)
fronts[k].sort(key=attrgetter('cd'), reverse=True)
fillsize = self.pop - len(self.p)
for l in range(fillsize):
self.p.append(fronts[k][l])
def evoloperators(self):
# This module decides if a generation will evolve by chromosome
# or by bin packing operators.
rannum = random.random()
if rannum < 1: #0.5: # chromosome operators
self.newgenes = ga.mutat(self.n, self.newgenes, self.pop)
# Form into solutions of the next generation
self.makeq()
else: # bin packing operators
self.makeqbybins()
def makeq(self):
if self.t == 0:
new = range(self.pop)
else:
new, self.q = sols.oldnew(self.archive, self.q, self.newgenes)
# Make new solutions in q
for m in new:
x, y = bp.ed(self.idnum, self.newgenes[m], self.bpp)
self.addnewsol(x, y, self.newgenes[m])
def makeqbybins(self):
# This function is almost the same as makeq() except modified for
# the bin packing operators.
# Sort out what solutions were modified by the crossover operation
new, self.q = sols.oldnew(self.archive, self.q, self.newgenes)
# First go through what's left in q
for m in range(len(self.q)):
qx = self.q[m].getx()
qy = self.q[m].gety()
x, y = self.binmutation(qx, qy)
if np.all(np.equal(x, qx)) is False: # Indicates new solution
del self.q[m]
newchrom = self.updatechrom(x)
self.addnewsol(x, y, newchrom)
# Then operate on new solutions
for m in new:
newx, newy = bp.ed(self.idnum, self.newgenes[m], self.bpp)
x, y = self.binmutation(newx, newy)
if np.all(np.equal(x, newx)) is False:
self.newgenes[m] = self.updatechrom(x)
self.addnewsol(x, y, self.newgenes[m])
def addnewsol(self, x, y, chromosome):
# This function makes a new solution out of x and y and archives it
constraints.concheck(self.idnum, x, self.bpp)
constraints.xycheck(self.idnum, x, y)
fit = self.moop.calcfits(x, y)
self.updatefe()
newsol = sols.MultiSol(self.idnum, chromosome, x, y, self.t, fit, 0, 0.0)
self.q.append(newsol)
self.archive.append(newsol)
self.updateid()
def cleanarchive(self, approxfront):
# This function removes unwanted solutions from the archive
# and calls the local search function.
aslimit = 75 # Limit to size of approximation set
if len(approxfront) > aslimit:
approxfront = self.ccotruncation(approxfront, aslimit)
# Every 50 generations check the archive
if self.t % 50 == 0:
set = [m for m in self.archive if m.getrank() == 1]
irrelevant, archfronts = fnds(set)
# Truncate approximate set
if len(archfronts[0]) > aslimit:
self.fittruncation(archfronts[0], aslimit)
self.archive = sols.reduce(self.archive, self.p, self.q)
return approxfront
def ccotruncation(self, approxfront, limit):
# This function performs a recurrent truncation of the
# approximate set based on cco.
print(' Removing superfluous solutions from the front.')
# Sort approxfront by the crowded distance indicator
approxfront.sort(key=attrgetter('cd'))
# Remove solutions with the lowest distance first
nremove = len(approxfront) - limit
for m in range(nremove):
if approxfront[m] in self.archive:
self.archive.remove(approxfront[m])
del approxfront[:nremove]
approxfront.sort(key=attrgetter('index'))
print(' There are now', len(approxfront), 'solutions in the Approximate Set.')
return approxfront
def fittruncation(self, approxfront, limit):
# This function removes solutions from the approximate set that have the same
# fitness values.
print('Removing superfluous solutions from the archive.')
nremove = len(approxfront) - limit
clusters = self.getclusters(approxfront)
nrm = 0
for k in range(len(clusters[0])):
cdremove = clusters[0][0].getcd()
for c in range(len(clusters)):
if nrm < nremove:
if len(clusters[c]) > 1:
if clusters[c][0].getcd() <= cdremove:
if clusters[c][0] not in self.p or self.q:
self.archive.remove(clusters[c][0])
nrm += 1
clusters[c].remove(clusters[c][0])
print('There are now', len(approxfront) - nrm, 'solutions in the Approximate Set.')
def getclusters(self, front):
# This function sorts a front into clusters of solutions
# Each cluster has the same number of bins in the solution
front.sort(key=attrgetter('fit0'))
clusters = []
m1 = 0
while m1 < len(front):
numbins = front[m1].getbins()
fitlist = []
for m2 in range(len(front)):
if front[m2].getbins() == numbins:
fitlist.append(front[m2])
m1 += 1
# Not a cluster if a solution is by itself
if len(fitlist) > 1:
fitlist.sort(key=attrgetter('cd'))
clusters.append(fitlist)
orderedbylen = sorted(clusters, key=len, reverse=True)
return orderedbylen
def localsearch(self):
# This function performs the Pareto dominance local search with
# the probability of search increasing from 0 to 0.2 over the course
# of the calculations.
probls = 0 # Probability of local search
# Update probability for progress in calculations
if self.funkeval > 0.9 * self.end:
probls = 0.2
else:
probls = 0.2 * float(self.funkeval) / (0.9 * self.end)
nls = 10
for m in range(self.pop):
ranls = random.random()
if ranls <= probls:
self.paretols(self.p[m], nls)
def paretols(self, m, nls, retrieve=False):
# This function finds up to nls neighbors to check if they dominate
# solution m of the new generation.
x = m.getx()
y = m.gety()
neighbors = []
for k in range(nls):
newx, newy = self.binmutation(x, y)
newfit = self.moop.calcfits(newx, newy)
self.updatefe()
# If we find a neighbor that is nondominated by solution m, add to q.
if not mop.dom(m.getfits(), newfit):
neighbors = self.addneighbor(newx, newy, newfit, neighbors)
if retrieve is True:
return neighbors
def addneighbor(self, x, y, fit, neighbors):
# This function creates a new solution based on local search and
# adds it to the neighbor list.
print(' Local search found another possible solution:', self.idnum)
newgenes = self.updatechrom(x)
newsol = sols.MultiSol(self.idnum, newgenes, x, y, self.t, fit, 0, 0.0)
neighbors.append(newsol)
self.q.append(newsol)
self.archive.append(newsol)
self.updateid()
return neighbors
def binmutation(self, x, y):
# This function performs bin-specific mutations at a mutation
# probability of 0.3
prob = 1 #0.3
ranmute = random.random()
if ranmute < prob:
ranm1 = random.random()
if ranm1 < 0.25:
x, y = self.partswap(x, y)
elif ranm1 < 0.50:
x, y = self.mergebins(x, y)
elif ranm1 < 0.75:
x, y = self.splitbin(x, y)
else:
x, y = self.itemswap(x, y)
return x, y
def itemswap(self, x, y):
# This function performs a 2-item swap:
# with an item from the fullest bin
# with an item from the emptiest bin
# with an item from a random bin
openbins = self.findopenbins(y)
# Select random bins
i1 = self.getmaxminranbin(x, openbins)
i2 = self.getrandsecondbin(i1, x, openbins)
try:
j1, j2 = self.getitemstoswap(x, i1, i2)
x[i1, j1] = 0
x[i1, j2] = 1
x[i2, j2] = 0
x[i2, j1] = 1
except:
binitems1 = self.finditemsinbin(x, i1)
if len(binitems1) != 1:
j1 = random.choice(binitems1) - 1
x[i1, j1] = 0
i2 = openbins[-1] + 1
x[i2, j1] = 1
y[i2] = 1
return x, y
def getitemstoswap(self, x, i1, i2):
# This function returns two items in bins i1 and i2 that
# can be feasibly swapped.
# Find items in bin i1 and bin i2
binitems1 = self.finditemsinbin(x, i1)
binitems2 = self.finditemsinbin(x, i2)
for count1 in range(len(binitems1)):
for count2 in range(len(binitems2)):
# Select random items in chosen bins
# Remember binitems is list of item indices, where index = j + 1
j1 = random.choice(binitems1) - 1
j2 = random.choice(binitems2) - 1
# Check for feasibility
newbin1 = list(binitems1)
newbin1.remove(j1 + 1)
newbin1.append(j2 + 1)
check1 = constraints.bincheck(newbin1, self.bpp)
newbin2 = list(binitems2)
newbin2.remove(j2 + 1)
newbin2.append(j1 + 1)
check2 = constraints.bincheck(newbin2, self.bpp)
# violation if bincheck returns True
truthvalue = check1 or check2
# Stop module and return values if found items to swap
if truthvalue is False:
return j1, j2
return False
def partswap(self, x, y):
# This function swaps parts of two bins with each other
openbins = self.findopenbins(y)
# Pick two random bins
i1 = random.choice(openbins)
binitems1 = self.finditemsinbin(x, i1)
while len(binitems1) == 1:
i1 = random.choice(openbins)
binitems1 = self.finditemsinbin(x, i1)
i2 = self.getrandsecondbin(i1, x, openbins)
# Pick two points to swap after
binitems2 = self.finditemsinbin(x, i2)
j1 = random.randrange(1, len(binitems1))
j2 = random.randrange(1, len(binitems2))
# Check for violations
newbin1 = binitems1[:j1] + binitems2[j2:]
newbin2 = binitems2[:j2] + binitems1[j1:]
violat1 = constraints.bincheck(newbin1, self.bpp)
violat2 = constraints.bincheck(newbin2, self.bpp)
if violat1 or violat2 is True:
self.splitbin(x, y)
# If no violations, swap items:
else:
for index in newbin1[j1:]:
x[i1, index - 1] = 1
x[i2, index - 1] = 0
for index in newbin2[j2:]:
x[i2, index - 1] = 1
x[i1, index - 1] = 0
return x, y
def mergebins(self, x, y):
# This function merges together the two least filled bins
i1, i2 = self.gettwominbins(x, y)
old1 = self.finditemsinbin(x, i1)
old2 = self.finditemsinbin(x, i2)
newbin = old1 + old2
violation = constraints.bincheck(newbin, self.bpp)
if violation is True:
self.splitbin(x, y)
else:
# Add items in bin2 to bin1
for index in old2:
x[i1, index - 1] = 1
# Move up other bins (overwriting bin2)
for i in range(i2, self.n - 1):
y[i] = y[i + 1]
for j in range(self.n):
x[i, j] = x[i + 1, j]
# Very last bin will now be empty
y[-1] = 0
x[-1, :] = 0
return x, y
def splitbin(self, x, y):
# This function splits either a random or the fullest bin into two
openbins = self.findopenbins(y)
# Get bin number to split
ransplit = random.random()
if ransplit < 0.5:
i1 = random.choice(openbins)
else:
i1 = self.getmaxbin(x)
# Get items in bin, check to make sure bin has more than one item
binitems = self.finditemsinbin(x, i1)
while len(binitems) == 1:
i1 = random.choice(openbins)
binitems = self.finditemsinbin(x, i1)
# Get random place to split bin
jsplit = random.randrange(1, len(binitems))
newbin = list(binitems[jsplit:])
i2 = openbins[-1] + 1
for index in newbin:
x[i1, index - 1] = 0
x[i2, index - 1] = 1
y[i2] = 1
return x, y
def findopenbins(self, y):
# This function gathers the open bin indices into a list.
# "open" is indicated if y[i] = 1
openbins = []
for i in range(self.n):
if y[i] == 1:
openbins.append(i)
return openbins
def getmaxminranbin(self, x, bins):
# This function randomly returns one bin number from the list "bins"
# max: the fullest bin
# min: the emptiest bin
# ran: a random bin
rannum = random.random()
if rannum < (1/3):
bini = self.getmaxbin(x)
elif rannum < (2/3):
bini = self.getminbin(x, bins)
else:
bini = random.choice(bins)
return bini
def getmaxbin(self, x):
# This function returns the fullest bin index.
binheights = np.dot(x, self.moop.getheightmatrix())
bini = np.argmax(binheights)
return bini
def getminbin(self, x, openbins):
# This function returns the emptiest bin index.
binheights = np.dot(x, self.moop.getheightmatrix())
lastopenbin = openbins[-1]
bini = np.argmin(binheights[:lastopenbin])
return bini
def gettwominbins(self, x, y):
# This function returns the indices of the two emptiest bins.
openbins = self.findopenbins(y)
lastopenbin = openbins[-1]
fill_level = np.zeros(lastopenbin)
for i in range(lastopenbin):
fill_level[i] = len(self.finditemsinbin(x, i))
binindices = fill_level.argsort()[:2]
i1 = binindices[0]
i2 = binindices[1]
return i1, i2
def getrandsecondbin(self, i1, x, openbins):
# This function returns a second random bin that is not
# bin i1.
i2 = random.choice(openbins)
binitems2 = self.finditemsinbin(x, i2)
while i2 == i1 or len(binitems2) == 1:
i2 = random.choice(openbins)
binitems2 = self.finditemsinbin(x, i2)
return i2
def finditemsinbin(self, x, bini):
# This function transforms the data in the x-matrix into the variable
# length representation in "bin".
# bini: index of bin
# vlrepi: list containing the item index in bin, where index = j + 1
vlrepi = []
for j in range(self.n):
if x[bini, j] == 1:
vlrepi.append(j + 1)
return vlrepi
def updatechrom(self, x):
# This function converts a given x-matrix into a chromosome
# representation
newchrom = []
for i in range(self.n):
for j in range(self.n):
if x[i, j] == 1:
newchrom.append(j+1)
return newchrom
def updatefe(self):
# This function keeps track of the number of function evaluations.
self.funkeval += 1
def endgen(self):
# Complete if number of function evaluations > end
return self.funkeval > self.end
def updateid(self):
self.idnum += 1
def getid(self):
return self.idnum
def gett(self):
return self.t
def getp(self):
return self.p
def getq(self):
return self.q
def getarch(self):
return self.archive
def fnds(setp):
# This module performs the fast-non-dominated-sort described in Deb(2002).
# To run this module, enable the following line:
# from mop import dom
numsol = len(setp)
fronts = []
sp = []
fhold = []
nps = []
for p in range(numsol):
shold = []
nump = 0
for q in range(numsol):
if setp[p] != setp[q]:
if mop.dom(setp[p].getfits(), setp[q].getfits()):
shold.append(setp[q])
if mop.dom(setp[q].getfits(), setp[p].getfits()):
nump += 1
sp.append(shold)
nps.append(nump)
if nump == 0:
fhold.append(setp[p])
setp[p].updaterank(1)
fronts.append(fhold) # Pareto set
i = 0
while fronts[i] != []:
q = []
for j in range(numsol):
if setp[j] in fronts[i]:
for k in range(numsol):
if setp[k] in sp[j]:
nps[k] -= 1
if nps[k] == 0:
setp[k].updaterank(i+2)
q.append(setp[k])
fronts.append(q)
i += 1
return setp, fronts
def cco(i, p, j, q):
# This module guides the selection process using the crowded distance
# assignment.
prank = p.getrank()
qrank = q.getrank()
if prank < qrank:
best = i
elif prank > qrank:
best = j
else:
pid = p.getcd()
qid = q.getcd()
if pid > qid:
best = i
else:
best = j
return best
def cda(front, nobj):
# This module performs the calculation of the crowding distance
# assignment.
front = rmdupl(front)
l = len(front)
fdist = []
indexes = []
for i in range(l):
fdist.append(0)
indexes.append([i, front[i].getbins(), front[i].getmaxh(), front[i].getavgw()])
for o in range(nobj):
indexes.sort(key=lambda pair: pair[o+1])
fdist[indexes[0][0]] = 1000000 # represent infinity
fdist[indexes[l-1][0]] = 1000000
for i in range(1, l-1):
fdist[indexes[i][0]] += (indexes[i+1][o+1]-indexes[i-1][o+1]) / \
(indexes[l-1][o+1]-indexes[0][o+1])
for p in range(l):
idist = fdist[p]
front[p].updatecd(idist)
return front
def rmdupl(front):
# This module takes a front produced by sorting a generation and
# removes any duplicate individuals.
r = 0
for p in range(len(front)): # remove duplicates
if front.count(front[r]) > 1:
del front[r]
r -= 1
r += 1
return front
def approx(archive):
# This module finds the final approximate set.
numsol = len(archive)
ndset = []
for p in range(numsol):
nump = 0 # number of sol. that dominate p
for q in range(numsol):
if archive[p] != archive[q]:
if mop.dom(archive[q].getfits(), archive[p].getfits()):
nump += 1
if nump == 0:
ndset.append(archive[p])
ndset = rmdupl(ndset)
return ndset
def see(ndset, folder):
# see prints out a file to show how x and y for a gene
# To run this module, enable the following line:
# import numpy as np
pathname = folder + 'variables/'
if not os.path.exists(pathname):
os.mkdir(pathname)
for m in ndset:
x, y = m.getx(), m.gety()
solid = str(m.getindex())
np.savetxt(pathname + solid + '_x.txt', x, fmt='%i', header='Item Location Matrix x:')
np.savetxt(pathname + solid + '_y.txt', y, fmt='%i', header='Bins Used Matrix y:')
def main():
# n = eval(input('Please enter the number of items to be sorted: \n'))
# folder = input('Please enter the name of the folder where your input file is: \n')
# file = input('Please enter the name of the input file: \n')
# moma(n, folder, file)
moma(500, '/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Static/MOMA/SBSBPP500/Experiment04/',
'SBSBPP500_run4.txt')
if __name__ == '__main__':
main()
| [
"os.mkdir",
"numpy.argmax",
"mop.MOproblem",
"numpy.argmin",
"glob.glob",
"constraints.xycheck",
"ga.mutat",
"outformat.startout",
"numpy.savetxt",
"os.path.exists",
"numpy.equal",
"items.makeitems",
"random.seed",
"ga.xover",
"datetime.datetime.now",
"csv.writer",
"ga.binsel",
"so... | [((577, 599), 'glob.glob', 'glob', (["(folder + '*.out')"], {}), "(folder + '*.out')\n", (581, 599), False, 'from glob import glob\n'), ((819, 864), 'outformat.startout', 'outf.startout', (['filename', 'n', 'end', 'data', '"""MOMA"""'], {}), "(filename, n, end, data, 'MOMA')\n", (832, 864), True, 'import outformat as outf\n'), ((878, 892), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (890, 892), False, 'from datetime import datetime\n'), ((1218, 1233), 'items.makeitems', 'makeitems', (['data'], {}), '(data)\n', (1227, 1233), False, 'from items import makeitems\n'), ((1244, 1272), 'binpacking.BPP', 'bp.BPP', (['n', 'binc', 'binh', 'items'], {}), '(n, binc, binh, items)\n', (1250, 1272), True, 'import binpacking as bp\n'), ((1284, 1309), 'mop.MOproblem', 'mop.MOproblem', (['pop', 'items'], {}), '(pop, items)\n', (1297, 1309), False, 'import mop\n'), ((2140, 2161), 'outformat.endout', 'outf.endout', (['filename'], {}), '(filename)\n', (2151, 2161), True, 'import outformat as outf\n'), ((1906, 1957), 'csv.writer', 'csv.writer', (['csvfile'], {'dialect': '"""excel"""', 'delimiter': '""","""'}), "(csvfile, dialect='excel', delimiter=',')\n", (1916, 1957), False, 'import csv\n'), ((2747, 2761), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2759, 2761), False, 'from datetime import datetime\n'), ((3183, 3214), 'ga.xover', 'ga.xover', (['self.q', 'self.pop', '(0.8)'], {}), '(self.q, self.pop, 0.8)\n', (3191, 3214), False, 'import ga\n'), ((3759, 3774), 'random.seed', 'random.seed', (['(52)'], {}), '(52)\n', (3770, 3774), False, 'import random\n'), ((5544, 5559), 'random.random', 'random.random', ([], {}), '()\n', (5557, 5559), False, 'import random\n'), ((6428, 6476), 'solutions.oldnew', 'sols.oldnew', (['self.archive', 'self.q', 'self.newgenes'], {}), '(self.archive, self.q, self.newgenes)\n', (6439, 6476), True, 'import solutions as sols\n'), ((7338, 7383), 'constraints.concheck', 'constraints.concheck', (['self.idnum', 'x', 'self.bpp'], {}), '(self.idnum, x, self.bpp)\n', (7358, 7383), False, 'import constraints\n'), ((7392, 7429), 'constraints.xycheck', 'constraints.xycheck', (['self.idnum', 'x', 'y'], {}), '(self.idnum, x, y)\n', (7411, 7429), False, 'import constraints\n'), ((7510, 7574), 'solutions.MultiSol', 'sols.MultiSol', (['self.idnum', 'chromosome', 'x', 'y', 'self.t', 'fit', '(0)', '(0.0)'], {}), '(self.idnum, chromosome, x, y, self.t, fit, 0, 0.0)\n', (7523, 7574), True, 'import solutions as sols\n'), ((8344, 8385), 'solutions.reduce', 'sols.reduce', (['self.archive', 'self.p', 'self.q'], {}), '(self.archive, self.p, self.q)\n', (8355, 8385), True, 'import solutions as sols\n'), ((12477, 12539), 'solutions.MultiSol', 'sols.MultiSol', (['self.idnum', 'newgenes', 'x', 'y', 'self.t', 'fit', '(0)', '(0.0)'], {}), '(self.idnum, newgenes, x, y, self.t, fit, 0, 0.0)\n', (12490, 12539), True, 'import solutions as sols\n'), ((12864, 12879), 'random.random', 'random.random', ([], {}), '()\n', (12877, 12879), False, 'import random\n'), ((15554, 15577), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (15567, 15577), False, 'import random\n'), ((16138, 16177), 'constraints.bincheck', 'constraints.bincheck', (['newbin1', 'self.bpp'], {}), '(newbin1, self.bpp)\n', (16158, 16177), False, 'import constraints\n'), ((16196, 16235), 'constraints.bincheck', 'constraints.bincheck', (['newbin2', 'self.bpp'], {}), '(newbin2, self.bpp)\n', (16216, 16235), False, 'import constraints\n'), ((16880, 16918), 'constraints.bincheck', 'constraints.bincheck', (['newbin', 'self.bpp'], {}), '(newbin, self.bpp)\n', (16900, 16918), False, 'import constraints\n'), ((17625, 17640), 'random.random', 'random.random', ([], {}), '()\n', (17638, 17640), False, 'import random\n'), ((18818, 18833), 'random.random', 'random.random', ([], {}), '()\n', (18831, 18833), False, 'import random\n'), ((19202, 19223), 'numpy.argmax', 'np.argmax', (['binheights'], {}), '(binheights)\n', (19211, 19223), True, 'import numpy as np\n'), ((19449, 19484), 'numpy.argmin', 'np.argmin', (['binheights[:lastopenbin]'], {}), '(binheights[:lastopenbin])\n', (19458, 19484), True, 'import numpy as np\n'), ((19707, 19728), 'numpy.zeros', 'np.zeros', (['lastopenbin'], {}), '(lastopenbin)\n', (19715, 19728), True, 'import numpy as np\n'), ((20092, 20115), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (20105, 20115), False, 'import random\n'), ((24965, 24989), 'os.path.exists', 'os.path.exists', (['pathname'], {}), '(pathname)\n', (24979, 24989), False, 'import os\n'), ((24999, 25017), 'os.mkdir', 'os.mkdir', (['pathname'], {}), '(pathname)\n', (25007, 25017), False, 'import os\n'), ((25114, 25205), 'numpy.savetxt', 'np.savetxt', (["(pathname + solid + '_x.txt')", 'x'], {'fmt': '"""%i"""', 'header': '"""Item Location Matrix x:"""'}), "(pathname + solid + '_x.txt', x, fmt='%i', header=\n 'Item Location Matrix x:')\n", (25124, 25205), True, 'import numpy as np\n'), ((25209, 25296), 'numpy.savetxt', 'np.savetxt', (["(pathname + solid + '_y.txt')", 'y'], {'fmt': '"""%i"""', 'header': '"""Bins Used Matrix y:"""'}), "(pathname + solid + '_y.txt', y, fmt='%i', header=\n 'Bins Used Matrix y:')\n", (25219, 25296), True, 'import numpy as np\n'), ((3006, 3044), 'ga.binsel', 'ga.binsel', (['self.p', 'self.pop', '"""elitism"""'], {}), "(self.p, self.pop, 'elitism')\n", (3015, 3044), False, 'import ga\n'), ((3080, 3114), 'ga.binsel', 'ga.binsel', (['self.p', 'self.pop', '"""cco"""'], {}), "(self.p, self.pop, 'cco')\n", (3089, 3114), False, 'import ga\n'), ((5644, 5685), 'ga.mutat', 'ga.mutat', (['self.n', 'self.newgenes', 'self.pop'], {}), '(self.n, self.newgenes, self.pop)\n', (5652, 5685), False, 'import ga\n'), ((5971, 6019), 'solutions.oldnew', 'sols.oldnew', (['self.archive', 'self.q', 'self.newgenes'], {}), '(self.archive, self.q, self.newgenes)\n', (5982, 6019), True, 'import solutions as sols\n'), ((6095, 6140), 'binpacking.ed', 'bp.ed', (['self.idnum', 'self.newgenes[m]', 'self.bpp'], {}), '(self.idnum, self.newgenes[m], self.bpp)\n', (6100, 6140), True, 'import binpacking as bp\n'), ((6959, 7004), 'binpacking.ed', 'bp.ed', (['self.idnum', 'self.newgenes[m]', 'self.bpp'], {}), '(self.idnum, self.newgenes[m], self.bpp)\n', (6964, 7004), True, 'import binpacking as bp\n'), ((11432, 11447), 'random.random', 'random.random', ([], {}), '()\n', (11445, 11447), False, 'import random\n'), ((12927, 12942), 'random.random', 'random.random', ([], {}), '()\n', (12940, 12942), False, 'import random\n'), ((15677, 15700), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (15690, 15700), False, 'import random\n'), ((17685, 17708), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (17698, 17708), False, 'import random\n'), ((17929, 17952), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (17942, 17952), False, 'import random\n'), ((20227, 20250), 'random.choice', 'random.choice', (['openbins'], {}), '(openbins)\n', (20240, 20250), False, 'import random\n'), ((5238, 5254), 'operator.attrgetter', 'attrgetter', (['"""cd"""'], {}), "('cd')\n", (5248, 5254), False, 'from operator import attrgetter\n'), ((8721, 8737), 'operator.attrgetter', 'attrgetter', (['"""cd"""'], {}), "('cd')\n", (8731, 8737), False, 'from operator import attrgetter\n'), ((9035, 9054), 'operator.attrgetter', 'attrgetter', (['"""index"""'], {}), "('index')\n", (9045, 9054), False, 'from operator import attrgetter\n'), ((10304, 10322), 'operator.attrgetter', 'attrgetter', (['"""fit0"""'], {}), "('fit0')\n", (10314, 10322), False, 'from operator import attrgetter\n'), ((14885, 14924), 'constraints.bincheck', 'constraints.bincheck', (['newbin1', 'self.bpp'], {}), '(newbin1, self.bpp)\n', (14905, 14924), False, 'import constraints\n'), ((15070, 15109), 'constraints.bincheck', 'constraints.bincheck', (['newbin2', 'self.bpp'], {}), '(newbin2, self.bpp)\n', (15090, 15109), False, 'import constraints\n'), ((19003, 19022), 'random.choice', 'random.choice', (['bins'], {}), '(bins)\n', (19016, 19022), False, 'import random\n'), ((6692, 6707), 'numpy.equal', 'np.equal', (['x', 'qx'], {}), '(x, qx)\n', (6700, 6707), True, 'import numpy as np\n'), ((7075, 7092), 'numpy.equal', 'np.equal', (['x', 'newx'], {}), '(x, newx)\n', (7083, 7092), True, 'import numpy as np\n'), ((14621, 14645), 'random.choice', 'random.choice', (['binitems1'], {}), '(binitems1)\n', (14634, 14645), False, 'import random\n'), ((14671, 14695), 'random.choice', 'random.choice', (['binitems2'], {}), '(binitems2)\n', (14684, 14695), False, 'import random\n'), ((4754, 4778), 'random.choice', 'random.choice', (['fronts[0]'], {}), '(fronts[0])\n', (4767, 4778), False, 'import random\n'), ((4833, 4857), 'random.choice', 'random.choice', (['fronts[1]'], {}), '(fronts[1])\n', (4846, 4857), False, 'import random\n'), ((5142, 5167), 'random.choice', 'random.choice', (['mneighbors'], {}), '(mneighbors)\n', (5155, 5167), False, 'import random\n'), ((10746, 10762), 'operator.attrgetter', 'attrgetter', (['"""cd"""'], {}), "('cd')\n", (10756, 10762), False, 'from operator import attrgetter\n'), ((13921, 13945), 'random.choice', 'random.choice', (['binitems1'], {}), '(binitems1)\n', (13934, 13945), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Numerical Integration - The Gauss-Lobatto-Legendre approach</div>
# </div>
# </div>
# </div>
# <p style="width:20%;float:right;padding-left:50px">
# <img src=../../share/images/book.jpg>
# <span style="font-size:smaller">
# </span>
# </p>
#
#
# ---
#
# This notebook is part of the supplementary material
# to [Computational Seismology: A Practical Introduction](https://global.oup.com/academic/product/computational-seismology-9780198717416?cc=de&lang=en&#),
# Oxford University Press, 2016.
#
#
# ##### Authors:
# * <NAME> ([@heinerigel](https://github.com/heinerigel))
# * <NAME> ([@swollherr](https://github.com/swollherr))
# * <NAME> ([@flo-woelfl](https://github.com/flo-woelfl))
# ---
# The following notebook presents a basic integration scheme that we're going to use in the Spectral Element Code as well as in the Discontinuous Galerkin Code to calculate the entries of the mass and stiffness matrices.
# **Fundamental principal:**<br>
# Replace the function $f(x)$ that we want to integrate by a polynomial approximation that can be integrated analytically.
#
# As interpolating functions we use the Lagrange polynomials $l_i$ and obtain the following integration scheme for an arbitrary function $f(x)$ defined on the interval $[-1,1]$ :
# \begin{eqnarray*} \int_{-1}^1 f(x) \ dx \approx \int _{-1}^1 P_N(x) dx = \sum_{i=1}^{N+1} w_i f(x_i) \end{eqnarray*}
# with
# \begin{eqnarray*}
# P_N(x)= \sum_{i=1}^{N+1} f(x_i) \ l_i^{(N)}(x).
# \end{eqnarray*}
# As collocation points we use the Gauss-Lobatto-Legendre points $x_i$ and the corresponding weights that are needed to evaluate the integral are calculated as follows:
# \begin{eqnarray*} w_i= \int_{-1}^1 l_i^{(N)}(x) \ dx \end{eqnarray*}.
# ## Exercises
# We want to investigate the performance of
# the numerical integration scheme. You can use the `gll()` routine to
# obtain the differentiation weights $w_i$ for an
# arbitrary function f(x) and the relevant integration points $x_i$.
#
# ### 1. Numerical integration of an arbritrary function:
# Define a function $f(x)$
# of your choice and calculate analytically the
# integral $\int f(x) \ dx$ for the interval $[−1, 1]$. Perform the integration numerically and compare the results.
#
# ### 2. The order of integration
# Modify the function and
# the order of the numerical integration. Discuss the results.
#
# ### Have fun!
# + {"code_folding": [0]}
# This is a configuration step for the exercise. Please run it before the simulation code!
import numpy as np
import matplotlib
# Show Plot in The Notebook
matplotlib.use("nbagg")
import matplotlib.pyplot as plt
from gll import gll
from lagrange2 import lagrange2
# Prettier plots.
plt.style.use('ggplot')
# +
# Exercise for Gauss integration
n = 1000
x = np.linspace(-1, 1, n)
# MODIFY f and intf to test different functions!
f = np.sin(x * np.pi)
# Analytical value of the DEFINITE integral from -1 to 1
intf = 1.0 / np.pi * (-np.cos(1.0 * np.pi) + np.cos(-1.0 * np.pi))
# Choose order
N = 4
# Uncomment for interactivity.
# N =int(input('Give order of integration: '))
# Get integration points and weights from the gll routine
xi, w = gll(N)
# Initialize function at points xi
fi = np.interp(xi, x, f)
# Evaluate integral
intfn = 0
for i in range(len(w)):
intfn = intfn + w[i] * fi[i]
# Calculate Lagrange Interpolant for plotting purposes.
lp = np.zeros((N + 1, len(x)))
for i in range(0, len(x)):
for j in range(-1, N):
lp[j + 1, i] = lagrange2(N, j, x[i], xi)
s = np.zeros_like(x)
for j in range(0, N + 1):
s = s + lp[j, :] * fi[j]
print('Solution of the analytical integral: %g' % intf)
print('Solution of the numerical integral: %g' % intfn)
# Plot results.
plt.figure(figsize=(10, 5))
plt.plot(x, f, 'k-', label='Analytical Function')
plt.plot(xi, fi, 'bs', label='GLL points')
plt.plot(x, s, label='Lagrange Interpolant')
plt.fill_between(x, s, np.zeros_like(x), alpha=0.3)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Numerical vs. Analytical Function')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.style.use",
"numpy.sin",
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.cos",
"numpy.interp",
"matplotlib.pyplot.ylabel",
... | [((3451, 3474), 'matplotlib.use', 'matplotlib.use', (['"""nbagg"""'], {}), "('nbagg')\n", (3465, 3474), False, 'import matplotlib\n'), ((3579, 3602), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3592, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3655, 3676), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'n'], {}), '(-1, 1, n)\n', (3666, 3676), True, 'import numpy as np\n'), ((3731, 3748), 'numpy.sin', 'np.sin', (['(x * np.pi)'], {}), '(x * np.pi)\n', (3737, 3748), True, 'import numpy as np\n'), ((4041, 4047), 'gll.gll', 'gll', (['N'], {}), '(N)\n', (4044, 4047), False, 'from gll import gll\n'), ((4089, 4108), 'numpy.interp', 'np.interp', (['xi', 'x', 'f'], {}), '(xi, x, f)\n', (4098, 4108), True, 'import numpy as np\n'), ((4392, 4408), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4405, 4408), True, 'import numpy as np\n'), ((4594, 4621), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4604, 4621), True, 'import matplotlib.pyplot as plt\n'), ((4622, 4671), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f', '"""k-"""'], {'label': '"""Analytical Function"""'}), "(x, f, 'k-', label='Analytical Function')\n", (4630, 4671), True, 'import matplotlib.pyplot as plt\n'), ((4673, 4715), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'fi', '"""bs"""'], {'label': '"""GLL points"""'}), "(xi, fi, 'bs', label='GLL points')\n", (4681, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4716, 4760), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 's'], {'label': '"""Lagrange Interpolant"""'}), "(x, s, label='Lagrange Interpolant')\n", (4724, 4760), True, 'import matplotlib.pyplot as plt\n'), ((4813, 4828), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4823, 4828), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4844), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4839, 4844), True, 'import matplotlib.pyplot as plt\n'), ((4845, 4891), 'matplotlib.pyplot.title', 'plt.title', (['"""Numerical vs. Analytical Function"""'], {}), "('Numerical vs. Analytical Function')\n", (4854, 4891), True, 'import matplotlib.pyplot as plt\n'), ((4893, 4905), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4903, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4906, 4916), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4914, 4916), True, 'import matplotlib.pyplot as plt\n'), ((4784, 4800), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4797, 4800), True, 'import numpy as np\n'), ((3852, 3872), 'numpy.cos', 'np.cos', (['(-1.0 * np.pi)'], {}), '(-1.0 * np.pi)\n', (3858, 3872), True, 'import numpy as np\n'), ((4362, 4387), 'lagrange2.lagrange2', 'lagrange2', (['N', 'j', 'x[i]', 'xi'], {}), '(N, j, x[i], xi)\n', (4371, 4387), False, 'from lagrange2 import lagrange2\n'), ((3830, 3849), 'numpy.cos', 'np.cos', (['(1.0 * np.pi)'], {}), '(1.0 * np.pi)\n', (3836, 3849), True, 'import numpy as np\n')] |
import numpy as np
from enum import Enum, auto
# Each row represents a single test-case
# The last element in the row is the expected result
input_data_set = np.array(
[[0, 0, 0],
[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]
)
def sigmoid(x, derivative=False):
if derivative:
return sigmoid(x) * (1 - sigmoid(x))
return 1 / (1 + np.exp(-x))
class Layer:
class LayerType(Enum):
IN = auto()
HIDDEN = auto()
OUT = auto()
def __init__(self, layer_id, layer_type, input_size, neuron_count=1, act_func=None):
self.layer_id = layer_id
self.layer_type = layer_type
self.data_input = np.empty(input_size)
self.weight_sum = np.empty(input_size)
if layer_type == self.LayerType.OUT and neuron_count != 1:
raise ValueError('Output layer must have only one neuron')
if layer_type == self.LayerType.IN:
if act_func is not None:
raise ValueError('Activation function cannot be defined for the input layer')
if input_size != neuron_count:
raise ValueError('Input size must be equal to neuron count for the input layer')
self.weight = np.identity(input_size)
self.bias = 0
self.act_func = lambda x: x
else:
self.weight = np.random.random_sample((neuron_count, input_size))
self.bias = np.random.random_sample()
self.delta = np.empty(neuron_count)
self.act_func = act_func
print('Initialized layer', layer_type.name, layer_id, 'with weight matrix\n', self.weight,
'\nand bias %.4f' % self.bias)
def compute(self, data_input):
self.data_input = data_input
self.weight_sum = self.weight @ data_input
self.weight_sum += np.full_like(self.weight_sum, self.bias)
return np.vectorize(self.act_func)(self.weight_sum)
def compute_layer_delta(self, delta):
if self.layer_type == self.LayerType.IN:
raise ValueError('Input layer can not have deltas')
self.delta = delta
return delta @ self.weight
def update_weights(self, learn_rate, verbose):
grad = np.array([self.act_func(x=x, derivative=True) for x in self.weight_sum])
d_weight = np.array([self.delta * grad]).T * self.data_input
d_bias = np.sum(grad) * np.sum(self.delta)
if verbose:
print('---------------------')
print('\t', self.layer_type.name, self.layer_id, 'input', self.data_input)
print('\t', self.layer_type.name, self.layer_id, 'sum', self.weight_sum)
print('\t', self.layer_type.name, self.layer_id, 'grad', grad)
print('\t', self.layer_type.name, self.layer_id, 'delta', self.delta)
print('\t', self.layer_type.name, self.layer_id, 'delta * grad * input\n', d_weight)
print('\t', self.layer_type.name, self.layer_id, 'd_bias\n', d_bias)
self.weight += learn_rate * d_weight
self.bias += learn_rate * d_bias
if verbose:
print('\t', self.layer_type.name, self.layer_id, 'weight\n', self.weight)
class Neuronet:
layers = []
def add_layer(self, layer_id, layer_type, input_size, neuron_count=1, act_func=None):
self.layers.append(Layer(layer_id=layer_id,
layer_type=layer_type,
input_size=input_size,
neuron_count=neuron_count,
act_func=act_func))
# hidden_layer_neuron_count is a list of neuron counts for each hidden layer
# e.g., [3, 4] is two hidden layers with 3 and 4 neurons
def __init__(self, input_size, hidden_layer_neuron_count, act_func):
self.add_layer(layer_id=0,
layer_type=Layer.LayerType.IN,
input_size=input_size,
neuron_count=input_size)
next_layer_input_size = input_size
for i in range(len(hidden_layer_neuron_count)):
self.add_layer(layer_id=i+1,
layer_type=Layer.LayerType.HIDDEN,
input_size=next_layer_input_size,
neuron_count=hidden_layer_neuron_count[i],
act_func=act_func)
next_layer_input_size = hidden_layer_neuron_count[i]
self.add_layer(layer_id=len(hidden_layer_neuron_count) + 1,
layer_type=Layer.LayerType.OUT,
input_size=next_layer_input_size,
act_func=act_func)
self.epoch_count = 0
print('Neuronet created!')
def iterate(self, input_row):
for layer in self.layers:
input_row = layer.compute(input_row)
return input_row
def backpropagate(self, delta_out):
delta = np.array([delta_out])
for idx in range(len(self.layers)-1, 0, -1):
delta = self.layers[idx].compute_layer_delta(delta)
def update_weight(self, learn_rate, verbose):
for layer in self.layers[1:]:
layer.update_weights(learn_rate, verbose)
def epoch(self, data_set, learn_rate, verbose):
if verbose:
print('----------------------------------------')
epoch_errors = np.empty(np.shape(data_set)[0])
epoch_results = np.empty(np.shape(data_set)[0])
for idx, row in enumerate(data_set):
epoch_results[idx] = self.iterate(row[0:-1])
epoch_errors[idx] = row[-1] - epoch_results[idx]
self.backpropagate(epoch_errors[idx])
self.update_weight(learn_rate, verbose)
mse = np.square(epoch_errors).mean()
self.epoch_count += 1
print('<%u>' % self.epoch_count, 'MSE = %.3f,' % mse, epoch_results)
return mse
def learn(self, data_set, learn_rate, thresold=-1.0, max_epochs=-1, verbose=False):
while True:
mse = self.epoch(data_set=data_set, learn_rate=learn_rate, verbose=verbose)
if (thresold > 0 and mse <= thresold) or (0 < max_epochs <= self.epoch_count):
break
def reset_epochs(self):
self.epoch_count = 0
if __name__ == "__main__":
np.set_printoptions(precision=3)
net = Neuronet(input_size=2,
hidden_layer_neuron_count=[2, 2],
act_func=sigmoid)
net.learn(data_set=input_data_set,
learn_rate=0.1,
thresold=0.005,
verbose=False)
| [
"numpy.full_like",
"numpy.set_printoptions",
"numpy.vectorize",
"numpy.random.random_sample",
"numpy.sum",
"numpy.empty",
"numpy.square",
"numpy.identity",
"numpy.shape",
"numpy.array",
"numpy.exp",
"enum.auto"
] | [((166, 220), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (174, 220), True, 'import numpy as np\n'), ((6416, 6448), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (6435, 6448), True, 'import numpy as np\n'), ((445, 451), 'enum.auto', 'auto', ([], {}), '()\n', (449, 451), False, 'from enum import Enum, auto\n'), ((470, 476), 'enum.auto', 'auto', ([], {}), '()\n', (474, 476), False, 'from enum import Enum, auto\n'), ((492, 498), 'enum.auto', 'auto', ([], {}), '()\n', (496, 498), False, 'from enum import Enum, auto\n'), ((690, 710), 'numpy.empty', 'np.empty', (['input_size'], {}), '(input_size)\n', (698, 710), True, 'import numpy as np\n'), ((738, 758), 'numpy.empty', 'np.empty', (['input_size'], {}), '(input_size)\n', (746, 758), True, 'import numpy as np\n'), ((1880, 1920), 'numpy.full_like', 'np.full_like', (['self.weight_sum', 'self.bias'], {}), '(self.weight_sum, self.bias)\n', (1892, 1920), True, 'import numpy as np\n'), ((5017, 5038), 'numpy.array', 'np.array', (['[delta_out]'], {}), '([delta_out])\n', (5025, 5038), True, 'import numpy as np\n'), ((373, 383), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (379, 383), True, 'import numpy as np\n'), ((1246, 1269), 'numpy.identity', 'np.identity', (['input_size'], {}), '(input_size)\n', (1257, 1269), True, 'import numpy as np\n'), ((1380, 1431), 'numpy.random.random_sample', 'np.random.random_sample', (['(neuron_count, input_size)'], {}), '((neuron_count, input_size))\n', (1403, 1431), True, 'import numpy as np\n'), ((1457, 1482), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (1480, 1482), True, 'import numpy as np\n'), ((1509, 1531), 'numpy.empty', 'np.empty', (['neuron_count'], {}), '(neuron_count)\n', (1517, 1531), True, 'import numpy as np\n'), ((1937, 1964), 'numpy.vectorize', 'np.vectorize', (['self.act_func'], {}), '(self.act_func)\n', (1949, 1964), True, 'import numpy as np\n'), ((2437, 2449), 'numpy.sum', 'np.sum', (['grad'], {}), '(grad)\n', (2443, 2449), True, 'import numpy as np\n'), ((2452, 2470), 'numpy.sum', 'np.sum', (['self.delta'], {}), '(self.delta)\n', (2458, 2470), True, 'import numpy as np\n'), ((2369, 2398), 'numpy.array', 'np.array', (['[self.delta * grad]'], {}), '([self.delta * grad])\n', (2377, 2398), True, 'import numpy as np\n'), ((5477, 5495), 'numpy.shape', 'np.shape', (['data_set'], {}), '(data_set)\n', (5485, 5495), True, 'import numpy as np\n'), ((5534, 5552), 'numpy.shape', 'np.shape', (['data_set'], {}), '(data_set)\n', (5542, 5552), True, 'import numpy as np\n'), ((5842, 5865), 'numpy.square', 'np.square', (['epoch_errors'], {}), '(epoch_errors)\n', (5851, 5865), True, 'import numpy as np\n')] |
import numpy as np
from scipy import interpolate
# Tool A: Interpolation for velocity points
def interpolation(label_point, t_interval, v_interval=None):
# sort the label points
label_point = np.array(sorted(label_point, key=lambda t_v: t_v[0]))
# ensure the input is int
t0_vec = np.array(t_interval).astype(int)
# get the ground truth curve using interpolation
peaks_selected = np.array(label_point)
func = interpolate.interp1d(peaks_selected[:, 0], peaks_selected[:, 1], kind='linear', fill_value="extrapolate")
y = func(t0_vec)
if v_interval is not None:
v_vec = np.array(v_interval).astype(int)
y = np.clip(y, v_vec[0], v_vec[-1])
return np.hstack((t0_vec.reshape((-1, 1)), y.reshape((-1, 1)))) | [
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.clip"
] | [((408, 429), 'numpy.array', 'np.array', (['label_point'], {}), '(label_point)\n', (416, 429), True, 'import numpy as np\n'), ((441, 551), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['peaks_selected[:, 0]', 'peaks_selected[:, 1]'], {'kind': '"""linear"""', 'fill_value': '"""extrapolate"""'}), "(peaks_selected[:, 0], peaks_selected[:, 1], kind=\n 'linear', fill_value='extrapolate')\n", (461, 551), False, 'from scipy import interpolate\n'), ((661, 692), 'numpy.clip', 'np.clip', (['y', 'v_vec[0]', 'v_vec[-1]'], {}), '(y, v_vec[0], v_vec[-1])\n', (668, 692), True, 'import numpy as np\n'), ((300, 320), 'numpy.array', 'np.array', (['t_interval'], {}), '(t_interval)\n', (308, 320), True, 'import numpy as np\n'), ((615, 635), 'numpy.array', 'np.array', (['v_interval'], {}), '(v_interval)\n', (623, 635), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Author : <NAME>
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from keras.models import Sequential, Model
from keras.layers import Convolution2D, MaxPooling2D, Conv3D, MaxPooling3D, ZeroPadding3D
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization, Input
from keras.utils.np_utils import to_categorical
from scipy.io import savemat
from sklearn.decomposition import PCA
from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam
import keras.callbacks as kcallbacks
from keras.regularizers import l2
import time
import collections
from sklearn import metrics, preprocessing
from keras import backend as K
from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ResneXt_IN_Dual_Network
def indexToAssignment(index_, Row, Col, pad_length):
new_assign = {}
for counter, value in enumerate(index_):
assign_0 = value // Col + pad_length
assign_1 = value % Col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def assignmentToIndex(assign_0, assign_1, Row, Col):
new_index = assign_0 * Col + assign_1
return new_index
def selectNeighboringPatch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row - ex_len, pos_row + ex_len + 1), :]
selected_patch = selected_rows[:, range(pos_col - ex_len, pos_col + ex_len + 1)]
return selected_patch
def sampling(proptionVal, groundTruth): # divide dataset into train and test datasets
labels_loc = {}
train = {}
test = {}
m = max(groundTruth)
for i in range(m):
indices = [j for j, x in enumerate(groundTruth.ravel().tolist()) if x == i + 1]
np.random.shuffle(indices)
labels_loc[i] = indices
nb_val = int(proptionVal * len(indices))
train[i] = indices[:-nb_val]
test[i] = indices[-nb_val:]
# whole_indices = []
train_indices = []
test_indices = []
for i in range(m):
# whole_indices += labels_loc[i]
train_indices += train[i]
test_indices += test[i]
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
return train_indices, test_indices
def model():
model = ResneXt_IN_Dual_Network.ResneXt_IN((1, img_rows, img_cols, img_channels), cardinality=8, classes=16)
RMS = RMSprop(lr=0.0003)
def mycrossentropy(y_true, y_pred, e=0.1):
loss1 = K.categorical_crossentropy(y_true, y_pred)
loss2 = K.categorical_crossentropy(K.ones_like(y_pred) / nb_classes, y_pred) # K.ones_like(y_pred) / nb_classes
return (1 - e) * loss1 + e * loss2
model.compile(loss=mycrossentropy, optimizer=RMS, metrics=['accuracy'])
return model
mat_data = sio.loadmat('D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_corrected.mat')
data_IN = mat_data['indian_pines_corrected']
mat_gt = sio.loadmat('D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_gt.mat')
gt_IN = mat_gt['indian_pines_gt']
print(data_IN.shape)
new_gt_IN = gt_IN
batch_size = 16
nb_classes = 16
nb_epoch = 100
img_rows, img_cols = 11, 11
patience = 100
INPUT_DIMENSION_CONV = 200 # 200
INPUT_DIMENSION = 200 # 200
TOTAL_SIZE = 10249 # 10249
VAL_SIZE = 1025 # 1025
TRAIN_SIZE = 5128 # 1031 2055 3081 4106 5128 6153
TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
VALIDATION_SPLIT = 0.5
img_channels = 200 # 200
PATCH_LENGTH = 5 # Patch_size
data = data_IN.reshape(np.prod(data_IN.shape[:2]), np.prod(data_IN.shape[2:]))
gt = new_gt_IN.reshape(np.prod(new_gt_IN.shape[:2]), )
data = preprocessing.scale(data)
data_ = data.reshape(data_IN.shape[0], data_IN.shape[1], data_IN.shape[2])
whole_data = data_
padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)
ITER = 1
CATEGORY = 16 # 16
train_data = np.zeros((TRAIN_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
test_data = np.zeros((TEST_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
KAPPA_3D_ResNeXt = []
OA_3D_ResNeXt = []
AA_3D_ResNeXt = []
TRAINING_TIME_3D_ResNeXt = []
TESTING_TIME_3D_ResNeXt = []
ELEMENT_ACC_3D_ResNeXt = np.zeros((ITER, CATEGORY))
seeds = [1334]
for index_iter in range(ITER):
print("# %d Iteration" % (index_iter + 1))
best_weights_ResNeXt_path = 'D:/3D-ResNeXt-master/models/Indian_best_3D_ResNeXt_5_1_4_60_' + str(
index_iter + 1) + '.hdf5'
np.random.seed(seeds[index_iter])
# train_indices, test_indices = sampleFixNum.samplingFixedNum(TRAIN_NUM, gt)
train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)
y_train = gt[train_indices] - 1
y_train = to_categorical(np.asarray(y_train))
y_test = gt[test_indices] - 1
y_test = to_categorical(np.asarray(y_test))
train_assign = indexToAssignment(train_indices, whole_data.shape[0], whole_data.shape[1], PATCH_LENGTH)
for i in range(len(train_assign)):
train_data[i] = selectNeighboringPatch(padded_data, train_assign[i][0], train_assign[i][1], PATCH_LENGTH)
test_assign = indexToAssignment(test_indices, whole_data.shape[0], whole_data.shape[1], PATCH_LENGTH)
for i in range(len(test_assign)):
test_data[i] = selectNeighboringPatch(padded_data, test_assign[i][0], test_assign[i][1], PATCH_LENGTH)
x_train = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2], INPUT_DIMENSION_CONV)
x_test_all = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], INPUT_DIMENSION_CONV)
x_val = x_test_all[-VAL_SIZE:]
y_val = y_test[-VAL_SIZE:]
x_test = x_test_all[:-VAL_SIZE]
y_test = y_test[:-VAL_SIZE]
model_ResNeXt = model()
model_ResNeXt.load_weights(best_weights_ResNeXt_path)
pred_test = model_ResNeXt.predict(
x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3], 1)).argmax(axis=1)
counter = collections.Counter(pred_test)
gt_test = gt[test_indices] - 1
overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
confusion_matrix = metrics.confusion_matrix(pred_test, gt_test[:-VAL_SIZE])
each_acc, average_acc = averageAccuracy.AA_andEachClassAccuracy(confusion_matrix)
kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])
KAPPA_3D_ResNeXt.append(kappa)
OA_3D_ResNeXt.append(overall_acc)
AA_3D_ResNeXt.append(average_acc)
# TRAINING_TIME_3D_ResNeXt.append(toc6 - tic6)
# TESTING_TIME_3D_ResNeXt.append(toc7 - tic7)
ELEMENT_ACC_3D_ResNeXt[index_iter, :] = each_acc
print("3D ResNeXt finished.")
print("# %d Iteration" % (index_iter + 1))
# print(counter)
modelStatsRecord.outputStats_assess(KAPPA_3D_ResNeXt, OA_3D_ResNeXt, AA_3D_ResNeXt, ELEMENT_ACC_3D_ResNeXt,
CATEGORY,
'D:/3D-ResNeXt-master/records/IN_test_3D_ResNeXt_5_1_4_60_1.txt',
'D:/3D-ResNeXt-master/records/IN_test_element_3D_ResNeXt_5_1_4_60_1.txt')
| [
"sklearn.metrics.confusion_matrix",
"Utils.modelStatsRecord.outputStats_assess",
"Utils.zeroPadding.zeroPadding_3D",
"Utils.ResneXt_IN_Dual_Network.ResneXt_IN",
"numpy.random.seed",
"sklearn.preprocessing.scale",
"scipy.io.loadmat",
"keras.backend.categorical_crossentropy",
"sklearn.metrics.accuracy... | [((2781, 2855), 'scipy.io.loadmat', 'sio.loadmat', (['"""D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_corrected.mat"""'], {}), "('D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_corrected.mat')\n", (2792, 2855), True, 'import scipy.io as sio\n'), ((2910, 2977), 'scipy.io.loadmat', 'sio.loadmat', (['"""D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_gt.mat"""'], {}), "('D:/3D-ResNeXt-master/Datasets/IN/Indian_pines_gt.mat')\n", (2921, 2977), True, 'import scipy.io as sio\n'), ((3574, 3599), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data'], {}), '(data)\n', (3593, 3599), False, 'from sklearn import metrics, preprocessing\n'), ((3710, 3762), 'Utils.zeroPadding.zeroPadding_3D', 'zeroPadding.zeroPadding_3D', (['whole_data', 'PATCH_LENGTH'], {}), '(whole_data, PATCH_LENGTH)\n', (3736, 3762), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ResneXt_IN_Dual_Network\n'), ((3807, 3899), 'numpy.zeros', 'np.zeros', (['(TRAIN_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, INPUT_DIMENSION_CONV)'], {}), '((TRAIN_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,\n INPUT_DIMENSION_CONV))\n', (3815, 3899), True, 'import numpy as np\n'), ((3908, 3999), 'numpy.zeros', 'np.zeros', (['(TEST_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1, INPUT_DIMENSION_CONV)'], {}), '((TEST_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,\n INPUT_DIMENSION_CONV))\n', (3916, 3999), True, 'import numpy as np\n'), ((4141, 4167), 'numpy.zeros', 'np.zeros', (['(ITER, CATEGORY)'], {}), '((ITER, CATEGORY))\n', (4149, 4167), True, 'import numpy as np\n'), ((6647, 6916), 'Utils.modelStatsRecord.outputStats_assess', 'modelStatsRecord.outputStats_assess', (['KAPPA_3D_ResNeXt', 'OA_3D_ResNeXt', 'AA_3D_ResNeXt', 'ELEMENT_ACC_3D_ResNeXt', 'CATEGORY', '"""D:/3D-ResNeXt-master/records/IN_test_3D_ResNeXt_5_1_4_60_1.txt"""', '"""D:/3D-ResNeXt-master/records/IN_test_element_3D_ResNeXt_5_1_4_60_1.txt"""'], {}), "(KAPPA_3D_ResNeXt, OA_3D_ResNeXt,\n AA_3D_ResNeXt, ELEMENT_ACC_3D_ResNeXt, CATEGORY,\n 'D:/3D-ResNeXt-master/records/IN_test_3D_ResNeXt_5_1_4_60_1.txt',\n 'D:/3D-ResNeXt-master/records/IN_test_element_3D_ResNeXt_5_1_4_60_1.txt')\n", (6682, 6916), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ResneXt_IN_Dual_Network\n'), ((2134, 2166), 'numpy.random.shuffle', 'np.random.shuffle', (['train_indices'], {}), '(train_indices)\n', (2151, 2166), True, 'import numpy as np\n'), ((2171, 2202), 'numpy.random.shuffle', 'np.random.shuffle', (['test_indices'], {}), '(test_indices)\n', (2188, 2202), True, 'import numpy as np\n'), ((2269, 2373), 'Utils.ResneXt_IN_Dual_Network.ResneXt_IN', 'ResneXt_IN_Dual_Network.ResneXt_IN', (['(1, img_rows, img_cols, img_channels)'], {'cardinality': '(8)', 'classes': '(16)'}), '((1, img_rows, img_cols, img_channels),\n cardinality=8, classes=16)\n', (2303, 2373), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ResneXt_IN_Dual_Network\n'), ((2381, 2399), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': '(0.0003)'}), '(lr=0.0003)\n', (2388, 2399), False, 'from keras.optimizers import Adam, SGD, Adadelta, RMSprop, Nadam\n'), ((3455, 3481), 'numpy.prod', 'np.prod', (['data_IN.shape[:2]'], {}), '(data_IN.shape[:2])\n', (3462, 3481), True, 'import numpy as np\n'), ((3483, 3509), 'numpy.prod', 'np.prod', (['data_IN.shape[2:]'], {}), '(data_IN.shape[2:])\n', (3490, 3509), True, 'import numpy as np\n'), ((3534, 3562), 'numpy.prod', 'np.prod', (['new_gt_IN.shape[:2]'], {}), '(new_gt_IN.shape[:2])\n', (3541, 3562), True, 'import numpy as np\n'), ((4405, 4438), 'numpy.random.seed', 'np.random.seed', (['seeds[index_iter]'], {}), '(seeds[index_iter])\n', (4419, 4438), True, 'import numpy as np\n'), ((5901, 5931), 'collections.Counter', 'collections.Counter', (['pred_test'], {}), '(pred_test)\n', (5920, 5931), False, 'import collections\n'), ((5986, 6040), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['pred_test', 'gt_test[:-VAL_SIZE]'], {}), '(pred_test, gt_test[:-VAL_SIZE])\n', (6008, 6040), False, 'from sklearn import metrics, preprocessing\n'), ((6064, 6120), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['pred_test', 'gt_test[:-VAL_SIZE]'], {}), '(pred_test, gt_test[:-VAL_SIZE])\n', (6088, 6120), False, 'from sklearn import metrics, preprocessing\n'), ((6149, 6206), 'Utils.averageAccuracy.AA_andEachClassAccuracy', 'averageAccuracy.AA_andEachClassAccuracy', (['confusion_matrix'], {}), '(confusion_matrix)\n', (6188, 6206), False, 'from Utils import zeroPadding, normalization, doPCA, modelStatsRecord, averageAccuracy, ResneXt_IN_Dual_Network\n'), ((6219, 6276), 'sklearn.metrics.cohen_kappa_score', 'metrics.cohen_kappa_score', (['pred_test', 'gt_test[:-VAL_SIZE]'], {}), '(pred_test, gt_test[:-VAL_SIZE])\n', (6244, 6276), False, 'from sklearn import metrics, preprocessing\n'), ((1742, 1768), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1759, 1768), True, 'import numpy as np\n'), ((2464, 2506), 'keras.backend.categorical_crossentropy', 'K.categorical_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2490, 2506), True, 'from keras import backend as K\n'), ((4654, 4673), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (4664, 4673), True, 'import numpy as np\n'), ((4738, 4756), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (4748, 4756), True, 'import numpy as np\n'), ((2551, 2570), 'keras.backend.ones_like', 'K.ones_like', (['y_pred'], {}), '(y_pred)\n', (2562, 2570), True, 'from keras import backend as K\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 29 09:24:09 2016
@author: abhishek
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
import xgboost as xgb
np.random.seed(44)
train = pd.read_csv('./data/train_processed_handle_na.csv')
test = pd.read_csv('./data/test_processed_handle_na.csv')
X = train[train.columns.drop('TARGET')]
y = train.TARGET
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=1279)
# evaluate xgboost model
param = dict([('max_depth', 3), ('learning_rate', 0.05), ('objective', 'binary:logistic'),
('eval_metric', 'auc'), ('seed', 1729), ('min_child_weight', 2),
('colsample_bytree', 0.95), ('subsample', 0.8)])
dtrain = xgb.DMatrix(X_train.values, label=y_train.values)
dtest = xgb.DMatrix(X_test.values, label=y_test.values)
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 1000000
xgb.train(param, dtrain, num_round, watchlist, early_stopping_rounds=10)
| [
"sklearn.cross_validation.train_test_split",
"numpy.random.seed",
"pandas.read_csv",
"xgboost.train",
"xgboost.DMatrix"
] | [((205, 223), 'numpy.random.seed', 'np.random.seed', (['(44)'], {}), '(44)\n', (219, 223), True, 'import numpy as np\n'), ((233, 284), 'pandas.read_csv', 'pd.read_csv', (['"""./data/train_processed_handle_na.csv"""'], {}), "('./data/train_processed_handle_na.csv')\n", (244, 284), True, 'import pandas as pd\n'), ((292, 342), 'pandas.read_csv', 'pd.read_csv', (['"""./data/test_processed_handle_na.csv"""'], {}), "('./data/test_processed_handle_na.csv')\n", (303, 342), True, 'import pandas as pd\n'), ((437, 505), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'stratify': 'y', 'random_state': '(1279)'}), '(X, y, test_size=0.3, stratify=y, random_state=1279)\n', (453, 505), False, 'from sklearn.cross_validation import train_test_split\n'), ((773, 822), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_train.values'], {'label': 'y_train.values'}), '(X_train.values, label=y_train.values)\n', (784, 822), True, 'import xgboost as xgb\n'), ((831, 878), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_test.values'], {'label': 'y_test.values'}), '(X_test.values, label=y_test.values)\n', (842, 878), True, 'import xgboost as xgb\n'), ((950, 1022), 'xgboost.train', 'xgb.train', (['param', 'dtrain', 'num_round', 'watchlist'], {'early_stopping_rounds': '(10)'}), '(param, dtrain, num_round, watchlist, early_stopping_rounds=10)\n', (959, 1022), True, 'import xgboost as xgb\n')] |
# Tools to load and save midi files for the rnn-gan-project.
#
# This file has been modified by <NAME> to support
# operations in c-rnn-gan.pytorch project.
#
# Written by <NAME>, http://mogren.one/
#
# This file has been modified by <NAME> to support
# c-rnn-gan.pytorch operations. Original file is available in:
#
# https://github.com/olofmogren/c-rnn-gan
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os, midi, math, random, re, sys
import numpy as np
from io import BytesIO
GENRE = 0
COMPOSER = 1
SONG_DATA = 2
# INDICES IN BATCHES (LENGTH,TONE,VELOCITY are repeated self.tones_per_cell times):
TICKS_FROM_PREV_START = 0
LENGTH = 1
TONE = 2
VELOCITY = 3
# INDICES IN SONG DATA (NOT YET BATCHED):
BEGIN_TICK = 0
NUM_FEATURES_PER_TONE = 3
IDEAL_TEMPO = 120.0
# hand-picked values for normalization
# "NZ" = "normalizer"
NZ = {
TICKS_FROM_PREV_START: {'u': 60.0, 's': 80.0},
LENGTH: {'u': 64.0, 's': 64.0},
TONE: {'min': 0, 'max': 127},
VELOCITY: {'u': 64.0, 's': 128.0},
}
debug = ''
#debug = 'overfit'
sources = {}
sources['baseline'] = {}
file_list = {}
file_list['validation'] = [
'Kingdom.mid',\
'Super1.mid',\
'Super2.mid',\
'Super3.mid'
]
file_list['test'] = []
# normalization, de-normalization functions
def norm_std(batch_songs, ix):
vals = batch_songs[:, :, ix]
vals = (vals - NZ[ix]['u']) / NZ[ix]['s']
batch_songs[:, :, ix] = vals
def norm_minmax(batch_songs, ix):
''' Min-max normalization, to range: [-1, 1]
'''
vals = batch_songs[:, :, ix]
vals = 2*((vals - NZ[ix]['min']) / (NZ[ix]['max'] - NZ[ix]['min'])) - 1
batch_songs[:, :, ix] = vals
def de_norm_std(song_data, ix):
vals = song_data[:, ix]
vals = (vals * NZ[ix]['s']) + NZ[ix]['u']
song_data[:, ix] = vals
def de_norm_minmax(song_data, ix):
vals = song_data[:, ix]
vals = ((vals + 1) / 2)*(NZ[ix]['max'] - NZ[ix]['min']) + NZ[ix]['min']
song_data[:, ix] = vals
class MusicDataLoader(object):
def __init__(self, datadir, pace_events=False, tones_per_cell=1, single_composer=None):
self.datadir = datadir
self.output_ticks_per_quarter_note = 384.0
self.tones_per_cell = tones_per_cell
self.single_composer = single_composer
self.pointer = {}
self.pointer['validation'] = 0
self.pointer['test'] = 0
self.pointer['train'] = 0
if not datadir is None:
print ('Data loader: datadir: {}'.format(datadir))
self.read_data(pace_events)
def read_data(self, pace_events):
"""
read_data takes a datadir with genre subdirs, and composer subsubdirs
containing midi files, reads them into training data for an rnn-gan model.
Midi music information will be real-valued frequencies of the
tones, and intensity taken from the velocity information in
the midi files.
returns a list of tuples, [genre, composer, song_data]
Also saves this list in self.songs.
Time steps will be fractions of beat notes (32th notes).
"""
self.genres = sorted(sources.keys())
print (('num genres:{}'.format(len(self.genres))))
if self.single_composer is not None:
self.composers = [self.single_composer]
else:
self.composers = []
for genre in self.genres:
self.composers.extend(sources[genre].keys())
if debug == 'overfit':
self.composers = self.composers[0:1]
self.composers = list(set(self.composers))
self.composers.sort()
print (('num composers: {}'.format(len(self.composers))))
self.songs = {}
self.songs['validation'] = []
self.songs['test'] = []
self.songs['train'] = []
# OVERFIT
count = 0
for genre in self.genres:
# OVERFIT
if debug == 'overfit' and count > 20: break
for composer in self.composers:
# OVERFIT
if debug == 'overfit' and composer not in self.composers: continue
if debug == 'overfit' and count > 20: break
current_path = os.path.join(self.datadir,os.path.join(genre, composer))
if not os.path.exists(current_path):
print ( 'Path does not exist: {}'.format(current_path))
continue
files = os.listdir(current_path)
#composer_id += 1
#if composer_id > max_composers:
# print (('Only using {} composers.'.format(max_composers))
# break
for i,f in enumerate(files):
# OVERFIT
if debug == 'overfit' and count > 20: break
count += 1
if i % 100 == 99 or i+1 == len(files):
print ( 'Reading files {}/{}: {}'.format(genre, composer, (i+1)))
if os.path.isfile(os.path.join(current_path,f)):
song_data = self.read_one_file(current_path, f, pace_events)
if song_data is None:
continue
if os.path.join(f) in file_list['validation']:
self.songs['validation'].append([genre, composer, song_data])
elif os.path.join(os.path.join(genre, composer), f) in file_list['test']:
self.songs['test'].append([genre, composer, song_data])
else:
self.songs['train'].append([genre, composer, song_data])
random.shuffle(self.songs['train'])
self.pointer['validation'] = 0
self.pointer['test'] = 0
self.pointer['train'] = 0
# DEBUG: OVERFIT. overfit.
if debug == 'overfit':
self.songs['train'] = self.songs['train'][0:1]
#print (('DEBUG: trying to overfit on the following (repeating for train/validation/test):')
for i in range(200):
self.songs['train'].append(self.songs['train'][0])
self.songs['validation'] = self.songs['train'][0:1]
self.songs['test'] = self.songs['train'][0:1]
#print (('lens: train: {}, val: {}, test: {}'.format(len(self.songs['train']), len(self.songs['validation']), len(self.songs['test'])))
return self.songs
def read_one_file(self, path, filename, pace_events):
try:
if debug:
print (('Reading {}'.format(os.path.join(path,filename))))
midi_pattern = midi.read_midifile(os.path.join(path,filename))
except:
print ( 'Error reading {}'.format(os.path.join(path,filename)))
return None
#
# Interpreting the midi pattern.
# A pattern has a list of tracks
# (midi.Track()).
# Each track is a list of events:
# * midi.events.SetTempoEvent: tick, data([int, int, int])
# (The three ints are really three bytes representing one integer.)
# * midi.events.TimeSignatureEvent: tick, data([int, int, int, int])
# (ignored)
# * midi.events.KeySignatureEvent: tick, data([int, int])
# (ignored)
# * midi.events.MarkerEvent: tick, text, data
# * midi.events.PortEvent: tick(int), data
# * midi.events.TrackNameEvent: tick(int), text(string), data([ints])
# * midi.events.ProgramChangeEvent: tick, channel, data
# * midi.events.ControlChangeEvent: tick, channel, data
# * midi.events.PitchWheelEvent: tick, data(two bytes, 14 bits)
#
# * midi.events.NoteOnEvent: tick(int), channel(int), data([int,int]))
# - data[0] is the note (0-127)
# - data[1] is the velocity.
# - if velocity is 0, this is equivalent of a midi.NoteOffEvent
# * midi.events.NoteOffEvent: tick(int), channel(int), data([int,int]))
#
# * midi.events.EndOfTrackEvent: tick(int), data()
#
# Ticks are relative.
#
# Tempo are in microseconds/quarter note.
#
# This interpretation was done after reading
# http://electronicmusic.wikia.com/wiki/Velocity
# http://faydoc.tripod.com/formats/mid.htm
# http://www.lastrayofhope.co.uk/2009/12/23/midi-delta-time-ticks-to-seconds/2/
# and looking at some files. It will hopefully be enough
# for the use in this project.
#
# We'll save the data intermediately with a dict representing each tone.
# The dicts we put into a list. Times are microseconds.
# Keys: 'freq', 'velocity', 'begin-tick', 'tick-length'
#
# 'Output ticks resolution' are fixed at a 32th note,
# - so 8 ticks per quarter note.
#
# This approach means that we do not currently support
# tempo change events.
#
# TODO 1: Figure out pitch.
# TODO 2: Figure out different channels and instruments.
#
song_data = []
tempos = []
# Tempo:
ticks_per_quarter_note = float(midi_pattern.resolution)
#print (('Resoluton: {}'.format(ticks_per_quarter_note))
input_ticks_per_output_tick = ticks_per_quarter_note/self.output_ticks_per_quarter_note
#if debug == 'overfit': input_ticks_per_output_tick = 1.0
# Multiply with output_ticks_pr_input_tick for output ticks.
for track in midi_pattern:
last_event_input_tick=0
not_closed_notes = []
for event in track:
if type(event) == midi.events.SetTempoEvent:
td = event.data # tempo data
tempo = 60 * 1000000 / (td[0]*(256**2) + td[1]*256 + td[2])
tempos.append(tempo)
elif (type(event) == midi.events.NoteOffEvent) or \
(type(event) == midi.events.NoteOnEvent and \
event.velocity == 0):
retained_not_closed_notes = []
for e in not_closed_notes:
if event.data[0] == e[TONE]:
event_abs_tick = float(event.tick+last_event_input_tick)/input_ticks_per_output_tick
#current_note['length'] = float(ticks*microseconds_per_tick)
e[LENGTH] = event_abs_tick-e[BEGIN_TICK]
song_data.append(e)
else:
retained_not_closed_notes.append(e)
not_closed_notes = retained_not_closed_notes
elif type(event) == midi.events.NoteOnEvent:
begin_tick = float(event.tick+last_event_input_tick)/input_ticks_per_output_tick
note = [0.0]*(NUM_FEATURES_PER_TONE+1)
note[TONE] = event.data[0]
note[VELOCITY] = float(event.data[1])
note[BEGIN_TICK] = begin_tick
not_closed_notes.append(note)
last_event_input_tick += event.tick
for e in not_closed_notes:
#print (('Warning: found no NoteOffEvent for this note. Will close it. {}'.format(e))
e[LENGTH] = float(ticks_per_quarter_note)/input_ticks_per_output_tick
song_data.append(e)
song_data.sort(key=lambda e: e[BEGIN_TICK])
if (pace_events):
pace_event_list = []
pace_tick = 0.0
song_tick_length = song_data[-1][BEGIN_TICK]+song_data[-1][LENGTH]
while pace_tick < song_tick_length:
song_data.append([0.0, 440.0, 0.0, pace_tick, 0.0])
pace_tick += float(ticks_per_quarter_note)/input_ticks_per_output_tick
song_data.sort(key=lambda e: e[BEGIN_TICK])
# tick adjustment (based on tempo)
avg_tempo = sum(tempos) / len(tempos)
for frame in song_data:
frame[BEGIN_TICK] = frame[BEGIN_TICK] * IDEAL_TEMPO/avg_tempo
return song_data
def rewind(self, part='train'):
self.pointer[part] = 0
def get_batch(self, batchsize, songlength, part='train', normalize=True):
"""
get_batch() returns a batch from self.songs, as a
pair of tensors (genrecomposer, song_data).
The first tensor is a tensor of genres and composers
(as two one-hot vectors that are concatenated).
The second tensor contains song data.
Song data has dimensions [batchsize, songlength, num_song_features]
To have the sequence be the primary index is convention in
tensorflow's rnn api.
The tensors will have to be split later.
Songs are currently chopped off after songlength.
TODO: handle this in a better way.
Since self.songs was shuffled in read_data(), the batch is
a random selection without repetition.
songlength is related to internal sample frequency.
We fix this to be every 32th notes. # 50 milliseconds.
This means 8 samples per quarter note.
There is currently no notion of tempo in the representation.
composer and genre is concatenated to each event
in the sequence. There might be more clever ways
of doing this. It's not reasonable to change composer
or genre in the middle of a song.
A tone has a feature telling us the pause before it.
"""
#print (('get_batch(): pointer: {}, len: {}, batchsize: {}'.format(self.pointer[part], len(self.songs[part]), batchsize))
if self.pointer[part] > len(self.songs[part])-batchsize:
batchsize = len(self.songs[part]) - self.pointer[part]
if batchsize == 0:
return None, None
if self.songs[part]:
batch = self.songs[part][self.pointer[part]:self.pointer[part]+batchsize]
self.pointer[part] += batchsize
# subtract two for start-time and channel, which we don't include.
num_meta_features = len(self.genres)+len(self.composers)
# All features except timing are multiplied with tones_per_cell (default 1)
num_song_features = NUM_FEATURES_PER_TONE*self.tones_per_cell+1
batch_genrecomposer = np.ndarray(shape=[batchsize, num_meta_features])
batch_songs = np.ndarray(shape=[batchsize, songlength, num_song_features])
for s in range(len(batch)):
songmatrix = np.ndarray(shape=[songlength, num_song_features])
composeronehot = onehot(self.composers.index(batch[s][1]), len(self.composers))
genreonehot = onehot(self.genres.index(batch[s][0]), len(self.genres))
genrecomposer = np.concatenate([genreonehot, composeronehot])
#random position:
begin = 0
if len(batch[s][SONG_DATA]) > songlength*self.tones_per_cell:
begin = random.randint(0, len(batch[s][SONG_DATA])-songlength*self.tones_per_cell)
matrixrow = 0
n = begin
while matrixrow < songlength:
eventindex = 0
event = np.zeros(shape=[num_song_features])
if n < len(batch[s][SONG_DATA]):
event[LENGTH] = batch[s][SONG_DATA][n][LENGTH]
event[TONE] = batch[s][SONG_DATA][n][TONE]
event[VELOCITY] = batch[s][SONG_DATA][n][VELOCITY]
ticks_from_start_of_prev_tone = 0.0
if n>0:
# beginning of this tone, minus starting of previous
ticks_from_start_of_prev_tone = batch[s][SONG_DATA][n][BEGIN_TICK]-batch[s][SONG_DATA][n-1][BEGIN_TICK]
# we don't include start-time at index 0:
# and not channel at -1.
# tones are allowed to overlap. This is indicated with
# relative time zero in the midi spec.
event[TICKS_FROM_PREV_START] = ticks_from_start_of_prev_tone
tone_count = 1
for simultaneous in range(1,self.tones_per_cell):
if n+simultaneous >= len(batch[s][SONG_DATA]):
break
if batch[s][SONG_DATA][n+simultaneous][BEGIN_TICK]-batch[s][SONG_DATA][n][BEGIN_TICK] == 0:
offset = simultaneous*NUM_FEATURES_PER_TONE
event[offset+LENGTH] = batch[s][SONG_DATA][n+simultaneous][LENGTH]
event[offset+TONE] = batch[s][SONG_DATA][n+simultaneous][TONE]
event[offset+VELOCITY] = batch[s][SONG_DATA][n+simultaneous][VELOCITY]
tone_count += 1
else:
break
songmatrix[matrixrow,:] = event
matrixrow += 1
n += tone_count
#if s == 0 and self.pointer[part] == batchsize:
# print ( songmatrix[0:10,:]
batch_genrecomposer[s,:] = genrecomposer
batch_songs[s,:,:] = songmatrix
# input normalization
if normalize:
norm_std(batch_songs, TICKS_FROM_PREV_START)
norm_std(batch_songs, LENGTH)
norm_std(batch_songs, VELOCITY)
norm_minmax(batch_songs, TONE)
return batch_genrecomposer, batch_songs
else:
raise 'get_batch() called but self.songs is not initialized.'
def get_num_song_features(self):
return NUM_FEATURES_PER_TONE*self.tones_per_cell+1
def get_num_meta_features(self):
return len(self.genres)+len(self.composers)
def get_midi_pattern(self, song_data, bpm, normalized=True):
"""
get_midi_pattern takes a song in internal representation
(a tensor of dimensions [songlength, self.num_song_features]).
the three values are length, frequency, velocity.
if velocity of a frame is zero, no midi event will be
triggered at that frame.
returns the midi_pattern.
Can be used with filename == None. Then nothing is saved, but only returned.
"""
#
# Interpreting the midi pattern.
# A pattern has a list of tracks
# (midi.Track()).
# Each track is a list of events:
# * midi.events.SetTempoEvent: tick, data([int, int, int])
# (The three ints are really three bytes representing one integer.)
# * midi.events.TimeSignatureEvent: tick, data([int, int, int, int])
# (ignored)
# * midi.events.KeySignatureEvent: tick, data([int, int])
# (ignored)
# * midi.events.MarkerEvent: tick, text, data
# * midi.events.PortEvent: tick(int), data
# * midi.events.TrackNameEvent: tick(int), text(string), data([ints])
# * midi.events.ProgramChangeEvent: tick, channel, data
# * midi.events.ControlChangeEvent: tick, channel, data
# * midi.events.PitchWheelEvent: tick, data(two bytes, 14 bits)
#
# * midi.events.NoteOnEvent: tick(int), channel(int), data([int,int]))
# - data[0] is the note (0-127)
# - data[1] is the velocity.
# - if velocity is 0, this is equivalent of a midi.NoteOffEvent
# * midi.events.NoteOffEvent: tick(int), channel(int), data([int,int]))
#
# * midi.events.EndOfTrackEvent: tick(int), data()
#
# Ticks are relative.
#
# Tempo are in microseconds/quarter note.
#
# This interpretation was done after reading
# http://electronicmusic.wikia.com/wiki/Velocity
# http://faydoc.tripod.com/formats/mid.htm
# http://www.lastrayofhope.co.uk/2009/12/23/midi-delta-time-ticks-to-seconds/2/
# and looking at some files. It will hopefully be enough
# for the use in this project.
#
# This approach means that we do not currently support
# tempo change events.
#
# Tempo:
# Multiply with output_ticks_pr_input_tick for output ticks.
midi_pattern = midi.Pattern([], resolution=int(self.output_ticks_per_quarter_note))
cur_track = midi.Track([])
cur_track.append(midi.events.SetTempoEvent(tick=0, bpm=IDEAL_TEMPO))
future_events = {}
last_event_tick = 0
ticks_to_this_tone = 0.0
song_events_absolute_ticks = []
abs_tick_note_beginning = 0.0
if type(song_data) != np.ndarray:
song_data = np.array(song_data)
# de-normalize
if normalized:
de_norm_std(song_data, TICKS_FROM_PREV_START)
de_norm_std(song_data, LENGTH)
de_norm_std(song_data, VELOCITY)
de_norm_minmax(song_data, TONE)
for frame in song_data:
abs_tick_note_beginning += int(round(frame[TICKS_FROM_PREV_START]))
for subframe in range(self.tones_per_cell):
offset = subframe*NUM_FEATURES_PER_TONE
tick_len = int(round(frame[offset+LENGTH]))
tone = int(round(frame[offset+TONE]))
velocity = min(int(round(frame[offset+VELOCITY])),127)
if tone is not None and velocity > 0 and tick_len > 0:
# range-check with preserved tone, changed one octave:
while tone < 0: tone += 12
while tone > 127: tone -= 12
song_events_absolute_ticks.append((abs_tick_note_beginning,
midi.events.NoteOnEvent(
tick=0,
velocity=velocity,
pitch=tone)))
song_events_absolute_ticks.append((abs_tick_note_beginning+tick_len,
midi.events.NoteOffEvent(
tick=0,
velocity=0,
pitch=tone)))
song_events_absolute_ticks.sort(key=lambda e: e[0])
abs_tick_note_beginning = 0.0
for abs_tick,event in song_events_absolute_ticks:
rel_tick = abs_tick-abs_tick_note_beginning
event.tick = int(round(rel_tick))
cur_track.append(event)
abs_tick_note_beginning=abs_tick
cur_track.append(midi.EndOfTrackEvent(tick=int(self.output_ticks_per_quarter_note)))
midi_pattern.append(cur_track)
return midi_pattern
def save_midi_pattern(self, filename, midi_pattern):
if filename is not None:
midi.write_midifile(filename, midi_pattern)
def save_data(self, filename, song_data, bpm=IDEAL_TEMPO):
"""
save_data takes a filename and a song in internal representation
(a tensor of dimensions [songlength, 3]).
the three values are length, frequency, velocity.
if velocity of a frame is zero, no midi event will be
triggered at that frame.
returns the midi_pattern.
Can be used with filename == None. Then nothing is saved, but only returned.
"""
midi_pattern = self.get_midi_pattern(song_data, bpm=bpm)
self.save_midi_pattern(filename, midi_pattern)
return midi_pattern
def tone_to_freq(tone):
"""
returns the frequency of a tone.
formulas from
* https://en.wikipedia.org/wiki/MIDI_Tuning_Standard
* https://en.wikipedia.org/wiki/Cent_(music)
"""
return math.pow(2, ((float(tone)-69.0)/12.0)) * 440.0
def freq_to_tone(freq):
"""
returns a dict d where
d['tone'] is the base tone in midi standard
d['cents'] is the cents to make the tone into the exact-ish frequency provided.
multiply this with 8192 to get the midi pitch level.
formulas from
* https://en.wikipedia.org/wiki/MIDI_Tuning_Standard
* https://en.wikipedia.org/wiki/Cent_(music)
"""
if freq <= 0.0:
return None
float_tone = (69.0+12*math.log(float(freq)/440.0, 2))
int_tone = int(float_tone)
cents = int(1200*math.log(float(freq)/tone_to_freq(int_tone), 2))
return {'tone': int_tone, 'cents': cents}
def onehot(i, length):
a = np.zeros(shape=[length])
a[i] = 1
return a
| [
"os.path.join",
"midi.Track",
"random.shuffle",
"numpy.zeros",
"os.path.exists",
"midi.events.NoteOnEvent",
"numpy.array",
"midi.write_midifile",
"midi.events.SetTempoEvent",
"midi.events.NoteOffEvent",
"numpy.ndarray",
"os.listdir",
"numpy.concatenate"
] | [((23093, 23117), 'numpy.zeros', 'np.zeros', ([], {'shape': '[length]'}), '(shape=[length])\n', (23101, 23117), True, 'import numpy as np\n'), ((5830, 5865), 'random.shuffle', 'random.shuffle', (["self.songs['train']"], {}), "(self.songs['train'])\n", (5844, 5865), False, 'import os, midi, math, random, re, sys\n'), ((19196, 19210), 'midi.Track', 'midi.Track', (['[]'], {}), '([])\n', (19206, 19210), False, 'import os, midi, math, random, re, sys\n'), ((13746, 13794), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '[batchsize, num_meta_features]'}), '(shape=[batchsize, num_meta_features])\n', (13756, 13794), True, 'import numpy as np\n'), ((13815, 13875), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '[batchsize, songlength, num_song_features]'}), '(shape=[batchsize, songlength, num_song_features])\n', (13825, 13875), True, 'import numpy as np\n'), ((19232, 19282), 'midi.events.SetTempoEvent', 'midi.events.SetTempoEvent', ([], {'tick': '(0)', 'bpm': 'IDEAL_TEMPO'}), '(tick=0, bpm=IDEAL_TEMPO)\n', (19257, 19282), False, 'import os, midi, math, random, re, sys\n'), ((19492, 19511), 'numpy.array', 'np.array', (['song_data'], {}), '(song_data)\n', (19500, 19511), True, 'import numpy as np\n'), ((21551, 21594), 'midi.write_midifile', 'midi.write_midifile', (['filename', 'midi_pattern'], {}), '(filename, midi_pattern)\n', (21570, 21594), False, 'import os, midi, math, random, re, sys\n'), ((4808, 4832), 'os.listdir', 'os.listdir', (['current_path'], {}), '(current_path)\n', (4818, 4832), False, 'import os, midi, math, random, re, sys\n'), ((6717, 6745), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (6729, 6745), False, 'import os, midi, math, random, re, sys\n'), ((13932, 13981), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '[songlength, num_song_features]'}), '(shape=[songlength, num_song_features])\n', (13942, 13981), True, 'import numpy as np\n'), ((14173, 14218), 'numpy.concatenate', 'np.concatenate', (['[genreonehot, composeronehot]'], {}), '([genreonehot, composeronehot])\n', (14187, 14218), True, 'import numpy as np\n'), ((4631, 4660), 'os.path.join', 'os.path.join', (['genre', 'composer'], {}), '(genre, composer)\n', (4643, 4660), False, 'import os, midi, math, random, re, sys\n'), ((4677, 4705), 'os.path.exists', 'os.path.exists', (['current_path'], {}), '(current_path)\n', (4691, 4705), False, 'import os, midi, math, random, re, sys\n'), ((14556, 14591), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_song_features]'}), '(shape=[num_song_features])\n', (14564, 14591), True, 'import numpy as np\n'), ((5284, 5313), 'os.path.join', 'os.path.join', (['current_path', 'f'], {}), '(current_path, f)\n', (5296, 5313), False, 'import os, midi, math, random, re, sys\n'), ((6646, 6674), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (6658, 6674), False, 'import os, midi, math, random, re, sys\n'), ((6798, 6826), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (6810, 6826), False, 'import os, midi, math, random, re, sys\n'), ((5460, 5475), 'os.path.join', 'os.path.join', (['f'], {}), '(f)\n', (5472, 5475), False, 'import os, midi, math, random, re, sys\n'), ((20444, 20506), 'midi.events.NoteOnEvent', 'midi.events.NoteOnEvent', ([], {'tick': '(0)', 'velocity': 'velocity', 'pitch': 'tone'}), '(tick=0, velocity=velocity, pitch=tone)\n', (20467, 20506), False, 'import os, midi, math, random, re, sys\n'), ((20787, 20843), 'midi.events.NoteOffEvent', 'midi.events.NoteOffEvent', ([], {'tick': '(0)', 'velocity': '(0)', 'pitch': 'tone'}), '(tick=0, velocity=0, pitch=tone)\n', (20811, 20843), False, 'import os, midi, math, random, re, sys\n'), ((5610, 5639), 'os.path.join', 'os.path.join', (['genre', 'composer'], {}), '(genre, composer)\n', (5622, 5639), False, 'import os, midi, math, random, re, sys\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 05:34:07 2018
@author: <NAME>
"""
# In[1]: Import Packages
import numpy as np
import time
import math
# In[2]: Function Implementation
def Gererate_Distance_Matrix(Row,Column,Matrix):
t1 = time.time()
Distance = 0
Distance_Vact = np.zeros((Row,1)) #Temp Distance Vector
Distance_Mat = np.zeros((Column,Column)) #Distance Materix
for Co in range(0,Column):
for Ro in range(0,Column):
for i in range(0,Row):
Distance_Vact[i] = ((Matrix[i][Co] - Matrix[i][Ro])**2)
Distance = math.sqrt(sum(Distance_Vact))
Distance_Mat[Ro][Co] = Distance
t2 = time.time()
time_elapsed = (t2-t1) #/60.0 #in minutes
return (Distance_Mat,time_elapsed)
# In[3]: Enhanced Implementation of the Function
def Gererate_Enhanced_Distance_Matrix(Row,Column,Matrix):
t1 = time.time()
Distance = Flag = 0
Distance_Mat = np.zeros((Column,Column)) #Distance Materix
for Co in range(0,Column):
for Next in range(0,Column):
if(Flag != 0 and (Next < Co)):
Distance_Mat[Next][Co] = Distance_Mat[Co][Next]
if(Co != Next):
Distance = ((Matrix[0][Co] - Matrix[0][Next])**2) + ((Matrix[1][Co] - Matrix[1][Next])**2)
Distance_Mat[Next][Co] = Distance
Distance = 0
Flag = 1
t2 = time.time()
time_elapsed = (t2-t1) #/60.0 #in minutes
return (Distance_Mat,time_elapsed)
# In[4]:
#Distance = np.abs((Matrix[i][Co] - Matrix[i][Next])) + Distance #Absolute will Spend more much time
| [
"numpy.zeros",
"time.time"
] | [((253, 264), 'time.time', 'time.time', ([], {}), '()\n', (262, 264), False, 'import time\n'), ((302, 320), 'numpy.zeros', 'np.zeros', (['(Row, 1)'], {}), '((Row, 1))\n', (310, 320), True, 'import numpy as np\n'), ((372, 398), 'numpy.zeros', 'np.zeros', (['(Column, Column)'], {}), '((Column, Column))\n', (380, 398), True, 'import numpy as np\n'), ((738, 749), 'time.time', 'time.time', ([], {}), '()\n', (747, 749), False, 'import time\n'), ((985, 996), 'time.time', 'time.time', ([], {}), '()\n', (994, 996), False, 'import time\n'), ((1040, 1066), 'numpy.zeros', 'np.zeros', (['(Column, Column)'], {}), '((Column, Column))\n', (1048, 1066), True, 'import numpy as np\n'), ((1550, 1561), 'time.time', 'time.time', ([], {}), '()\n', (1559, 1561), False, 'import time\n')] |
from paddle.fluid import layers
from pytracking.features.featurebase import FeatureBase
from pytracking.libs.paddle_utils import PTensor
import numpy as np
class RGB(FeatureBase):
"""RGB feature normalized to [-0.5, 0.5]."""
def dim(self):
return 3
def stride(self):
return self.pool_stride
def extract(self, im: np.ndarray):
return im / 255 - 0.5
class Grayscale(FeatureBase):
"""Grayscale feature normalized to [-0.5, 0.5]."""
def dim(self):
return 1
def stride(self):
return self.pool_stride
def extract(self, im: np.ndarray):
return np.mean(im / 255 - 0.5, 1, keepdims=True)
| [
"numpy.mean"
] | [((656, 697), 'numpy.mean', 'np.mean', (['(im / 255 - 0.5)', '(1)'], {'keepdims': '(True)'}), '(im / 255 - 0.5, 1, keepdims=True)\n', (663, 697), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
pd.set_option("display.max_rows", 5)
pd.set_option("display.max_columns", 6)
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import seaborn as sns
sns.set_theme(style="darkgrid")
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('stats.pdf')
print("Setup Complete")
tips = sns.load_dataset("tips")
plot = sns.relplot(x="total_bill", y="tip", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="smoker", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="smoker", style="smoker",
data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="smoker", style="time", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="size", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="size", palette="ch:r=-.5,l=.75", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", size="size", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", size="size", sizes=(15, 200), data=tips);
pp.savefig()
plot.figure.clear()
df = pd.DataFrame(dict(time=np.arange(500),
value=np.random.randn(500).cumsum()))
g = sns.relplot(x="time", y="value", kind="line", data=df)
g.figure.autofmt_xdate()
pp.savefig()
g.figure.clear()
df = pd.DataFrame(np.random.randn(500, 2).cumsum(axis=0), columns=["x", "y"])
plot = sns.relplot(x="x", y="y", sort=False, kind="line", data=df);
pp.savefig()
plot.figure.clear()
fmri = sns.load_dataset("fmri")
plot = sns.relplot(x="timepoint", y="signal", kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", ci=None, kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", kind="line", ci="sd", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", estimator=None, kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="event", kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="region", style="event",
kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="region", style="event",
dashes=False, markers=True, kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="event", style="event",
kind="line", data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="region",
units="subject", estimator=None,
kind="line", data=fmri.query("event == 'stim'"));
pp.savefig()
plot.figure.clear()
dots = sns.load_dataset("dots").query("align == 'dots'")
plot = sns.relplot(x="time", y="firing_rate",
hue="coherence", style="choice",
kind="line", data=dots);
pp.savefig()
plot.figure.clear()
palette = sns.cubehelix_palette(light=.8, n_colors=6)
plot = sns.relplot(x="time", y="firing_rate",
hue="coherence", style="choice",
palette=palette,
kind="line", data=dots);
pp.savefig()
plot.figure.clear()
from matplotlib.colors import LogNorm
palette = sns.cubehelix_palette(light=.7, n_colors=6)
plot = sns.relplot(x="time", y="firing_rate",
hue="coherence", style="choice",
hue_norm=LogNorm(),
kind="line",
data=dots.query("coherence > 0"));
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="time", y="firing_rate",
size="coherence", style="choice",
kind="line", data=dots);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="time", y="firing_rate",
hue="coherence", size="choice",
palette=palette,
kind="line", data=dots);
pp.savefig()
plot.figure.clear()
df = pd.DataFrame(dict(time=pd.date_range("2017-1-1", periods=500),
value=np.random.randn(500).cumsum()))
g = sns.relplot(x="time", y="value", kind="line", data=df)
g.figure.autofmt_xdate()
pp.savefig()
g.figure.clear()
plot = sns.relplot(x="total_bill", y="tip", hue="smoker",
col="time", data=tips);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="subject",
col="region", row="event", height=3,
kind="line", estimator=None, data=fmri);
pp.savefig()
plot.figure.clear()
plot = sns.relplot(x="timepoint", y="signal", hue="event", style="event",
col="subject", col_wrap=5,
height=3, aspect=.75, linewidth=2.5,
kind="line", data=fmri.query("region == 'frontal'"));
pp.savefig()
plot.figure.clear()
pp.close()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"pandas.date_range",
"numpy.random.randn",
"seaborn.load_dataset",
"pandas.plotting.register_matplotlib_converters",
"seaborn.cubehelix_palette",
"matplotlib.pyplot.rcParams.update",
"matplotlib.colors.LogNorm",
"numpy.arange",
"seaborn.relplot",
"pand... | [((86, 122), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(5)'], {}), "('display.max_rows', 5)\n", (99, 122), True, 'import pandas as pd\n'), ((123, 162), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(6)'], {}), "('display.max_columns', 6)\n", (136, 162), True, 'import pandas as pd\n'), ((163, 207), 'pandas.plotting.register_matplotlib_converters', 'pd.plotting.register_matplotlib_converters', ([], {}), '()\n', (205, 207), True, 'import pandas as pd\n'), ((240, 291), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (259, 291), True, 'import matplotlib.pyplot as plt\n'), ((314, 345), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (327, 345), True, 'import seaborn as sns\n'), ((404, 425), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""stats.pdf"""'], {}), "('stats.pdf')\n", (412, 425), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((458, 482), 'seaborn.load_dataset', 'sns.load_dataset', (['"""tips"""'], {}), "('tips')\n", (474, 482), True, 'import seaborn as sns\n'), ((490, 537), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'data': 'tips'}), "(x='total_bill', y='tip', data=tips)\n", (501, 537), True, 'import seaborn as sns\n'), ((580, 641), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""smoker"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='smoker', data=tips)\n", (591, 641), True, 'import seaborn as sns\n'), ((684, 761), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""smoker"""', 'style': '"""smoker"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='smoker', style='smoker', data=tips)\n", (695, 761), True, 'import seaborn as sns\n'), ((816, 891), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""smoker"""', 'style': '"""time"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='smoker', style='time', data=tips)\n", (827, 891), True, 'import seaborn as sns\n'), ((934, 993), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""size"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='size', data=tips)\n", (945, 993), True, 'import seaborn as sns\n'), ((1036, 1125), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""size"""', 'palette': '"""ch:r=-.5,l=.75"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='size', palette='ch:r=-.5,l=.75',\n data=tips)\n", (1047, 1125), True, 'import seaborn as sns\n'), ((1164, 1224), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'size': '"""size"""', 'data': 'tips'}), "(x='total_bill', y='tip', size='size', data=tips)\n", (1175, 1224), True, 'import seaborn as sns\n'), ((1267, 1344), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'size': '"""size"""', 'sizes': '(15, 200)', 'data': 'tips'}), "(x='total_bill', y='tip', size='size', sizes=(15, 200), data=tips)\n", (1278, 1344), True, 'import seaborn as sns\n'), ((1489, 1543), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""value"""', 'kind': '"""line"""', 'data': 'df'}), "(x='time', y='value', kind='line', data=df)\n", (1500, 1543), True, 'import seaborn as sns\n'), ((1685, 1744), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""x"""', 'y': '"""y"""', 'sort': '(False)', 'kind': '"""line"""', 'data': 'df'}), "(x='x', y='y', sort=False, kind='line', data=df)\n", (1696, 1744), True, 'import seaborn as sns\n'), ((1787, 1811), 'seaborn.load_dataset', 'sns.load_dataset', (['"""fmri"""'], {}), "('fmri')\n", (1803, 1811), True, 'import seaborn as sns\n'), ((1819, 1881), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', kind='line', data=fmri)\n", (1830, 1881), True, 'import seaborn as sns\n'), ((1924, 1995), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'ci': 'None', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', ci=None, kind='line', data=fmri)\n", (1935, 1995), True, 'import seaborn as sns\n'), ((2038, 2109), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'kind': '"""line"""', 'ci': '"""sd"""', 'data': 'fmri'}), "(x='timepoint', y='signal', kind='line', ci='sd', data=fmri)\n", (2049, 2109), True, 'import seaborn as sns\n'), ((2152, 2230), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'estimator': 'None', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', estimator=None, kind='line', data=fmri)\n", (2163, 2230), True, 'import seaborn as sns\n'), ((2273, 2348), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""event"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='event', kind='line', data=fmri)\n", (2284, 2348), True, 'import seaborn as sns\n'), ((2391, 2487), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""region"""', 'style': '"""event"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='region', style='event', kind=\n 'line', data=fmri)\n", (2402, 2487), True, 'import seaborn as sns\n'), ((2537, 2661), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""region"""', 'style': '"""event"""', 'dashes': '(False)', 'markers': '(True)', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='region', style='event', dashes=\n False, markers=True, kind='line', data=fmri)\n", (2548, 2661), True, 'import seaborn as sns\n'), ((2711, 2806), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""event"""', 'style': '"""event"""', 'kind': '"""line"""', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='event', style='event', kind=\n 'line', data=fmri)\n", (2722, 2806), True, 'import seaborn as sns\n'), ((3114, 3213), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""firing_rate"""', 'hue': '"""coherence"""', 'style': '"""choice"""', 'kind': '"""line"""', 'data': 'dots'}), "(x='time', y='firing_rate', hue='coherence', style='choice',\n kind='line', data=dots)\n", (3125, 3213), True, 'import seaborn as sns\n'), ((3279, 3323), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'light': '(0.8)', 'n_colors': '(6)'}), '(light=0.8, n_colors=6)\n', (3300, 3323), True, 'import seaborn as sns\n'), ((3330, 3446), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""firing_rate"""', 'hue': '"""coherence"""', 'style': '"""choice"""', 'palette': 'palette', 'kind': '"""line"""', 'data': 'dots'}), "(x='time', y='firing_rate', hue='coherence', style='choice',\n palette=palette, kind='line', data=dots)\n", (3341, 3446), True, 'import seaborn as sns\n'), ((3562, 3606), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'light': '(0.7)', 'n_colors': '(6)'}), '(light=0.7, n_colors=6)\n', (3583, 3606), True, 'import seaborn as sns\n'), ((3842, 3942), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""firing_rate"""', 'size': '"""coherence"""', 'style': '"""choice"""', 'kind': '"""line"""', 'data': 'dots'}), "(x='time', y='firing_rate', size='coherence', style='choice',\n kind='line', data=dots)\n", (3853, 3942), True, 'import seaborn as sns\n'), ((4005, 4120), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""firing_rate"""', 'hue': '"""coherence"""', 'size': '"""choice"""', 'palette': 'palette', 'kind': '"""line"""', 'data': 'dots'}), "(x='time', y='firing_rate', hue='coherence', size='choice',\n palette=palette, kind='line', data=dots)\n", (4016, 4120), True, 'import seaborn as sns\n'), ((4318, 4372), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""time"""', 'y': '"""value"""', 'kind': '"""line"""', 'data': 'df'}), "(x='time', y='value', kind='line', data=df)\n", (4329, 4372), True, 'import seaborn as sns\n'), ((4436, 4509), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""total_bill"""', 'y': '"""tip"""', 'hue': '"""smoker"""', 'col': '"""time"""', 'data': 'tips'}), "(x='total_bill', y='tip', hue='smoker', col='time', data=tips)\n", (4447, 4509), True, 'import seaborn as sns\n'), ((4564, 4699), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""timepoint"""', 'y': '"""signal"""', 'hue': '"""subject"""', 'col': '"""region"""', 'row': '"""event"""', 'height': '(3)', 'kind': '"""line"""', 'estimator': 'None', 'data': 'fmri'}), "(x='timepoint', y='signal', hue='subject', col='region', row=\n 'event', height=3, kind='line', estimator=None, data=fmri)\n", (4575, 4699), True, 'import seaborn as sns\n'), ((3057, 3081), 'seaborn.load_dataset', 'sns.load_dataset', (['"""dots"""'], {}), "('dots')\n", (3073, 3081), True, 'import seaborn as sns\n'), ((3718, 3727), 'matplotlib.colors.LogNorm', 'LogNorm', ([], {}), '()\n', (3725, 3727), False, 'from matplotlib.colors import LogNorm\n'), ((1408, 1422), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (1417, 1422), True, 'import numpy as np\n'), ((1618, 1641), 'numpy.random.randn', 'np.random.randn', (['(500)', '(2)'], {}), '(500, 2)\n', (1633, 1641), True, 'import numpy as np\n'), ((4213, 4251), 'pandas.date_range', 'pd.date_range', (['"""2017-1-1"""'], {'periods': '(500)'}), "('2017-1-1', periods=500)\n", (4226, 4251), True, 'import pandas as pd\n'), ((1453, 1473), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (1468, 1473), True, 'import numpy as np\n'), ((4282, 4302), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (4297, 4302), True, 'import numpy as np\n')] |
import numpy as np
try:
import rospy
except:
pass
from visual_dynamics.spaces import BoxSpace
from visual_dynamics.policies import Pr2TargetPolicy
from visual_dynamics.pr2 import planning, berkeley_pr2, pr2_trajectories
class Pr2MovingArmTargetPolicy(Pr2TargetPolicy):
def __init__(self, env, frame_id, offset, lr='r', min_gripper_displacement=0.05, gripper_state_space=None):
"""
This policy points the camera to the offset in the target frame
Args:
env: Pr2Env
frame_id: frame id of the target
offset: offset relative to the target frame
lr: 'l' for left arm and 'r' for right arm
min_gripper_displacement: minimum distance to move when sampling for a new gripper position
gripper_state_space: pan, tilt and distance of the target location for the gripper tool frame
"""
super(Pr2MovingArmTargetPolicy, self).__init__(env, frame_id, offset)
self.pr2 = self.env.pr2
self.lr = lr
self.min_gripper_displacement = min_gripper_displacement
self.gripper_state_space = gripper_state_space or \
BoxSpace(np.r_[self.env.state_space.low, .6],
np.r_[self.env.state_space.high, .8])
target_gripper_state = (self.gripper_state_space.low + self.gripper_state_space.high) / 2.0
self.start_arm_trajectory(self.target_pos_from_gripper_state(target_gripper_state),
wait=True, speed_factor=1)
def act(self, obs):
if not self.env.pr2.is_moving():
self.start_arm_trajectory()
return super(Pr2MovingArmTargetPolicy, self).act(obs)
def reset(self):
if not self.env.pr2.is_moving():
self.start_arm_trajectory()
return super(Pr2MovingArmTargetPolicy, self).reset()
def start_arm_trajectory(self, target_pos=None, wait=False, speed_factor=.1):
if target_pos is None:
while target_pos is None or \
np.linalg.norm(target_pos - curr_pos) < self.min_gripper_displacement:
self.env.pr2.update_rave()
curr_pos = self.env.pr2.robot.GetLink(self.frame_id).GetTransform()[:3, 3]
gripper_state = self.gripper_state_space.sample()
target_pos = self.target_pos_from_gripper_state(gripper_state)
if isinstance(target_pos, np.ndarray):
target_pos = target_pos.tolist()
self.pr2.update_rave()
traj = planning.plan_up_trajectory(self.pr2.robot, self.lr, target_pos)
bodypart2traj = {"%sarm" % self.lr: traj}
pr2_trajectories.follow_body_traj(self.pr2, bodypart2traj, wait=wait, speed_factor=speed_factor)
def target_pos_from_gripper_state(self, gripper_state):
pan, tilt, distance = gripper_state
camera_T = berkeley_pr2.get_kinect_transform(self.env.pr2.robot)
ax2 = -np.sin(tilt) * distance
ax0 = -np.cos(pan) * ax2 / np.tan(tilt)
ax1 = -np.sin(pan) * ax2 / np.tan(tilt)
ax = np.array([ax0, ax1, ax2])
target_pos = ax + camera_T[:3, 3]
return target_pos
def _get_config(self):
config = super(Pr2MovingArmTargetPolicy, self)._get_config()
config.update({'lr': self.lr})
return config
| [
"visual_dynamics.pr2.berkeley_pr2.get_kinect_transform",
"visual_dynamics.spaces.BoxSpace",
"visual_dynamics.pr2.planning.plan_up_trajectory",
"numpy.tan",
"numpy.array",
"numpy.sin",
"visual_dynamics.pr2.pr2_trajectories.follow_body_traj",
"numpy.linalg.norm",
"numpy.cos"
] | [((2560, 2624), 'visual_dynamics.pr2.planning.plan_up_trajectory', 'planning.plan_up_trajectory', (['self.pr2.robot', 'self.lr', 'target_pos'], {}), '(self.pr2.robot, self.lr, target_pos)\n', (2587, 2624), False, 'from visual_dynamics.pr2 import planning, berkeley_pr2, pr2_trajectories\n'), ((2683, 2783), 'visual_dynamics.pr2.pr2_trajectories.follow_body_traj', 'pr2_trajectories.follow_body_traj', (['self.pr2', 'bodypart2traj'], {'wait': 'wait', 'speed_factor': 'speed_factor'}), '(self.pr2, bodypart2traj, wait=wait,\n speed_factor=speed_factor)\n', (2716, 2783), False, 'from visual_dynamics.pr2 import planning, berkeley_pr2, pr2_trajectories\n'), ((2904, 2957), 'visual_dynamics.pr2.berkeley_pr2.get_kinect_transform', 'berkeley_pr2.get_kinect_transform', (['self.env.pr2.robot'], {}), '(self.env.pr2.robot)\n', (2937, 2957), False, 'from visual_dynamics.pr2 import planning, berkeley_pr2, pr2_trajectories\n'), ((3106, 3131), 'numpy.array', 'np.array', (['[ax0, ax1, ax2]'], {}), '([ax0, ax1, ax2])\n', (3114, 3131), True, 'import numpy as np\n'), ((1183, 1273), 'visual_dynamics.spaces.BoxSpace', 'BoxSpace', (['np.r_[self.env.state_space.low, 0.6]', 'np.r_[self.env.state_space.high, 0.8]'], {}), '(np.r_[self.env.state_space.low, 0.6], np.r_[self.env.state_space.\n high, 0.8])\n', (1191, 1273), False, 'from visual_dynamics.spaces import BoxSpace\n'), ((3032, 3044), 'numpy.tan', 'np.tan', (['tilt'], {}), '(tilt)\n', (3038, 3044), True, 'import numpy as np\n'), ((3080, 3092), 'numpy.tan', 'np.tan', (['tilt'], {}), '(tilt)\n', (3086, 3092), True, 'import numpy as np\n'), ((2973, 2985), 'numpy.sin', 'np.sin', (['tilt'], {}), '(tilt)\n', (2979, 2985), True, 'import numpy as np\n'), ((2072, 2109), 'numpy.linalg.norm', 'np.linalg.norm', (['(target_pos - curr_pos)'], {}), '(target_pos - curr_pos)\n', (2086, 2109), True, 'import numpy as np\n'), ((3012, 3023), 'numpy.cos', 'np.cos', (['pan'], {}), '(pan)\n', (3018, 3023), True, 'import numpy as np\n'), ((3060, 3071), 'numpy.sin', 'np.sin', (['pan'], {}), '(pan)\n', (3066, 3071), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def most_common(lst):
"""Returns the most common element in a list."""
return max(set(lst), key=lst.count)
def euclidean(point, data):
"""
Euclidean distance between point & data.
Point has dimensions (m,), data has dimensions (n,m), and output will be of size (n,).
"""
return np.sqrt(np.sum((point - data)**2, axis=1))
class KNeighborsRegressor:
def __init__(self, k=5, dist_metric=euclidean):
self.k = k
self.dist_metric = dist_metric
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
neighbors = []
for x in X_test:
distances = self.dist_metric(x, self.X_train)
y_sorted = [y for _, y in sorted(zip(distances, self.y_train))]
neighbors.append(y_sorted[:self.k])
return np.mean(neighbors, axis=1)
def evaluate(self, X_test, y_test):
y_pred = self.predict(X_test)
ssre = sum((y_pred - y_test)**2)
return ssre
# Unpack the California housing dataset, from StatLib repository
housing = datasets.fetch_california_housing()
X = housing['data'][:500]
y = housing['target'][:500]
# Split data into train & test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Preprocess data
ss = StandardScaler().fit(X_train)
X_train, X_test = ss.transform(X_train), ss.transform(X_test)
# Test knn model across varying ks
accuracies = []
ks = range(1, 30)
for k in ks:
knn = KNeighborsRegressor(k=k)
knn.fit(X_train, y_train)
accuracy = knn.evaluate(X_test, y_test)
accuracies.append(accuracy)
# Visualize accuracy vs. k
fig, ax = plt.subplots()
ax.plot(ks, accuracies)
ax.set(xlabel="k",
ylabel="SSRE",
title="Performance of knn")
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.fetch_california_housing",
"numpy.mean",
"matplotlib.pyplot.subplots"
] | [((1341, 1376), 'sklearn.datasets.fetch_california_housing', 'datasets.fetch_california_housing', ([], {}), '()\n', (1374, 1376), False, 'from sklearn import datasets\n'), ((1508, 1545), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1524, 1545), False, 'from sklearn.model_selection import train_test_split\n'), ((1940, 1954), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1952, 1954), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2070), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2068, 2070), True, 'import matplotlib.pyplot as plt\n'), ((517, 552), 'numpy.sum', 'np.sum', (['((point - data) ** 2)'], {'axis': '(1)'}), '((point - data) ** 2, axis=1)\n', (523, 552), True, 'import numpy as np\n'), ((1088, 1114), 'numpy.mean', 'np.mean', (['neighbors'], {'axis': '(1)'}), '(neighbors, axis=1)\n', (1095, 1114), True, 'import numpy as np\n'), ((1573, 1589), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1587, 1589), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import numpy as np
def solution(m):
lvl0 = []
lvl1 = []
lvl2 = []
lvl3 = []
lvl4 = []
sums = []
odds1 = []
odds2 = []
odds3 = []
odds4 = []
x = 0
while True:
for i in m[x]:
if i != 0:
if x == 0:
lvl0.append(i)
lvl0.append(([m[x].index(i)])[0])
if x == 1:
lvl1.append(i)
lvl1.append(([m[x].index(i)])[0])
if x == 2:
lvl2.append(i)
lvl2.append(([m[x].index(i)])[0])
if x == 3:
lvl3.append(i)
lvl3.append(([m[x].index(i)])[0])
if x == 4:
lvl4.append(i)
lvl4.append(([m[x].index(i)])[0])
#x = ([m[x].index(i)])[0]
if x == 0:
sums.append(sum(m[x]))
if x == 1:
sums.append(sum(m[x]))
if x == 2:
sums.append(sum(m[x]))
if x == 3:
sums.append(sum(m[x]))
if x == 4:
sums.append(sum(m[x]))
x = x + 1
if x >= len(m):
break
l = []
x = lvl0
for index, element in enumerate(x):
top = 1
bottom = 1
l.clear()
if index % 2 == 0:
if x[index + 1] == 1:
x = lvl1
print(x)
l.append(element)
l.append(sums[0])
for index, element in enumerate(x):
if index % 2 == 0:
x = lvl1
if x[index + 1] == 3:
x = lvl3
l.append(element)
l.append(sums[1])
if sum(x) == 0:
top = 1
bottom = 1
for index, element in enumerate(l):
if index % 2 == 0:
top = top * element
elif index % 2 != 0:
bottom = bottom * element
odds3.append(top)
odds3.append(bottom)
print('run')
l.pop(len(l) - 1)
print(x)
l.pop(len(l) - 2)
elif x[index + 1] == 4:
x = lvl4
l.append(element)
l.append(sums[1])
if sum(x) == 0:
top = 1
bottom = 1
for index, element in enumerate(l):
if index % 2 == 0:
top = top * element
elif index % 2 != 0:
bottom = bottom * element
odds4.append(top)
odds4.append(bottom)
l.clear()
elif x[index + 1] == 2:
x = lvl2
l.append(element)
l.append(sums[0])
if sum(x) == 0:
top = 1
bottom = 1
for index, element in enumerate(l):
if index % 2 == 0:
top = top * element
elif index % 2 != 0:
bottom = bottom * element
odds2.append(top)
odds2.append(bottom)
l.clear()
for index, element in enumerate(x):
if index % 2 == 0:
if x[index + 1] == 4:
x = lvl4
l.append(element)
l.append(sums[1])
if sum(x) == 0:
for index, element in enumerate(l):
if index % 2 == 0:
top = top * element
elif index % 2 != 0:
bottom = bottom * element
odds3.append(top)
odds3.append(bottom)
l.clear()
denom = []
denom.append(odds2[1])
denom.append(odds3[1])
denom.append(odds4[1])
lcm = (np.lcm.reduce(denom))
if int(odds2[1]) != lcm:
odds2[0] = int(odds2[0] * (lcm / odds2[1]))
final = []
for d in odds2:
if odds2.index(d) % 2 == 0:
final.append(int(d))
for d in odds3:
if odds3.index(d) % 2 == 0:
final.append(int(d))
for d in odds4:
if odds4.index(d) % 2 == 0:
final.append(int(d))
final.append(int(lcm))
print(final)
solution([[0, 2, 1, 0, 0], [0, 0, 0, 3, 4], [0, 0, 0, 0, 0], [0, 0, 0, 0,0], [0, 0, 0, 0, 0]]) | [
"numpy.lcm.reduce"
] | [((5363, 5383), 'numpy.lcm.reduce', 'np.lcm.reduce', (['denom'], {}), '(denom)\n', (5376, 5383), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import logging
import os
from pathlib import Path
from shutil import copyfile
import click
import numpy as np
# Libraries for preprocessing
import torch
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument("input_filepath", type=click.Path(exists=True))
@click.argument("output_filepath", type=click.Path())
def main(input_filepath, output_filepath):
"""
Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info("making final data set from raw data")
def normalize(tensor):
mu = torch.mean(tensor)
std = torch.std(tensor)
output = (tensor - mu) / std
return output
# Find training data
files = os.listdir(input_filepath)
data_all = [
np.load(os.path.join(input_filepath, f))
for f in files
if f.endswith(".npz") and "train" in f
]
# Combine .npz files
merged_data = dict(data_all[0])
for data in data_all[1:]:
for k in data.keys():
merged_data[k] = np.vstack((merged_data[k], dict(data)[k]))
merged_data["labels"] = np.reshape(
merged_data["labels"], merged_data["labels"].size
)
# TODO: Clean up
np.savez(os.path.join(output_filepath, "train_data.npz"), **merged_data)
# Load in the train file
train = np.load(os.path.join(output_filepath, "train_data.npz"))
# Now we organize into tenzors and normalize dem
images_train = normalize(torch.Tensor(train.f.images))
labels_train = torch.Tensor(train.f.labels).type(torch.LongTensor)
# Save the individual tensors
torch.save(images_train, os.path.join(output_filepath, "images_train.pt"))
torch.save(labels_train, os.path.join(output_filepath, "labels_train.pt"))
# Pass test data through to output
copyfile(
os.path.join(input_filepath, "test.npz"),
os.path.join(output_filepath, "test.npz"),
)
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| [
"torch.mean",
"logging.basicConfig",
"dotenv.find_dotenv",
"click.command",
"pathlib.Path",
"torch.std",
"torch.Tensor",
"numpy.reshape",
"click.Path",
"os.path.join",
"os.listdir",
"logging.getLogger"
] | [((225, 240), 'click.command', 'click.command', ([], {}), '()\n', (238, 240), False, 'import click\n'), ((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((835, 861), 'os.listdir', 'os.listdir', (['input_filepath'], {}), '(input_filepath)\n', (845, 861), False, 'import os\n'), ((1226, 1287), 'numpy.reshape', 'np.reshape', (["merged_data['labels']", "merged_data['labels'].size"], {}), "(merged_data['labels'], merged_data['labels'].size)\n", (1236, 1287), True, 'import numpy as np\n'), ((2140, 2195), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (2159, 2195), False, 'import logging\n'), ((687, 705), 'torch.mean', 'torch.mean', (['tensor'], {}), '(tensor)\n', (697, 705), False, 'import torch\n'), ((720, 737), 'torch.std', 'torch.std', (['tensor'], {}), '(tensor)\n', (729, 737), False, 'import torch\n'), ((1337, 1384), 'os.path.join', 'os.path.join', (['output_filepath', '"""train_data.npz"""'], {}), "(output_filepath, 'train_data.npz')\n", (1349, 1384), False, 'import os\n'), ((1451, 1498), 'os.path.join', 'os.path.join', (['output_filepath', '"""train_data.npz"""'], {}), "(output_filepath, 'train_data.npz')\n", (1463, 1498), False, 'import os\n'), ((1583, 1611), 'torch.Tensor', 'torch.Tensor', (['train.f.images'], {}), '(train.f.images)\n', (1595, 1611), False, 'import torch\n'), ((1748, 1796), 'os.path.join', 'os.path.join', (['output_filepath', '"""images_train.pt"""'], {}), "(output_filepath, 'images_train.pt')\n", (1760, 1796), False, 'import os\n'), ((1827, 1875), 'os.path.join', 'os.path.join', (['output_filepath', '"""labels_train.pt"""'], {}), "(output_filepath, 'labels_train.pt')\n", (1839, 1875), False, 'import os\n'), ((1939, 1979), 'os.path.join', 'os.path.join', (['input_filepath', '"""test.npz"""'], {}), "(input_filepath, 'test.npz')\n", (1951, 1979), False, 'import os\n'), ((1989, 2030), 'os.path.join', 'os.path.join', (['output_filepath', '"""test.npz"""'], {}), "(output_filepath, 'test.npz')\n", (2001, 2030), False, 'import os\n'), ((280, 303), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (290, 303), False, 'import click\n'), ((345, 357), 'click.Path', 'click.Path', ([], {}), '()\n', (355, 357), False, 'import click\n'), ((2474, 2487), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (2485, 2487), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((895, 926), 'os.path.join', 'os.path.join', (['input_filepath', 'f'], {}), '(input_filepath, f)\n', (907, 926), False, 'import os\n'), ((1632, 1660), 'torch.Tensor', 'torch.Tensor', (['train.f.labels'], {}), '(train.f.labels)\n', (1644, 1660), False, 'import torch\n'), ((2286, 2300), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2290, 2300), False, 'from pathlib import Path\n')] |
"""numpy基礎
ランダムな数値で配列を作成する方法
標準正規分布に従う数値の配列を作成する場合(randn関数)
[説明ページ]
https://tech.nkhn37.net/numpy-ndarray-create-basic/#randn
"""
import numpy as np
data_array = np.random.randn(10)
print(data_array)
print(type(data_array))
print(data_array.dtype)
| [
"numpy.random.randn"
] | [((164, 183), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (179, 183), True, 'import numpy as np\n')] |
import os
import glob
from datetime import datetime, timedelta
import json
import time
import pandas as pd
import numpy as np
import pickle
import lightgbm as lgbm
from google.cloud import bigquery
def load_strava_activity_data_from_bq(users=["TyAndrews"]):
start_time = time.time()
raw_files_dict = {}
for user in users:
print(f"{user} Data Found")
bqclient = bigquery.Client()
strava_data_query = """
SELECT
distance_km,
type,
start_date_local AS start_time,
distance_km AS distance_raw_km,
elapsed_time_hrs AS elapsed_time_raw_hrs,
moving_time_hrs AS moving_time_raw_hrs,
total_elevation_gain AS elevation_gain
FROM `{0}.prod_dashboard.raw_strava_data` LIMIT 5000""".format(
bqclient.project
)
raw_files_dict[user] = bqclient.query(strava_data_query).result().to_dataframe()
print(
f"load_strava_activity_data: Took {time.time() - start_time: .2f}s to get BQ data"
)
return raw_files_dict
def preprocess_strava_df(raw_df, min_act_length=1200, max_act_dist=400, export=False):
# Remove activites under 5 minutes in length
processed_df = raw_df[
(raw_df.elapsed_time_raw > min_act_length) & (raw_df.distance < max_act_dist)
]
print(
f"\t{len(raw_df[(raw_df.elapsed_time_raw < min_act_length) & (raw_df.distance < max_act_dist)])} Activities Under 20min in Length, Removed from Dataset"
)
processed_df = processed_df.convert_dtypes()
processed_df[["distance", "distance_raw"]] = processed_df[
["distance", "distance_raw"]
].apply(pd.to_numeric)
processed_df[["start_date_local"]] = pd.to_datetime(
processed_df["start_date_local_raw"], unit="s"
) # .apply(pd.to_datetime(unit='s'))
processed_df["exer_start_time"] = pd.to_datetime(
processed_df["start_date_local"].dt.strftime("1990:01:01:%H:%M:%S"),
format="1990:01:01:%H:%M:%S",
)
# processed_df['exer_start_time'] = pd.to_datetime(pd.to_datetime(processed_df['start_time']).dt.strftime('1990:01:01:%H:%M:%S'), format='1990:01:01:%H:%M:%S')
processed_df["exer_start_time"] = (
processed_df["exer_start_time"]
.dt.tz_localize("UTC")
.dt.tz_convert("Europe/London")
)
processed_df["act_type_perc_time"] = processed_df["moving_time_raw"] / sum(
processed_df["moving_time_raw"]
)
processed_df["elapsed_time_raw_hrs"] = processed_df["elapsed_time_raw"] / 3600
processed_df["moving_time_raw_hrs"] = processed_df["moving_time_raw"] / 3600
processed_df["distance_raw_km"] = processed_df["distance_raw"] / 1000
if export == True:
processed_df.to_csv(r"data\processed\ProcessedStravaData.csv")
return processed_df
def load_employment_model_data():
model_data_file_path = os.path.abspath(
os.path.join(os.getcwd(), "data", "processed")
)
print("Loading Employment Model Data: " + model_data_file_path)
start = time.process_time()
train_data = os.path.join(model_data_file_path, "train_employment_data.csv")
test_data = os.path.join(model_data_file_path, "test_employment_data.csv")
files = [train_data, test_data]
all_data = []
for f in files:
data = pd.read_csv(f)
all_data.append(data)
return all_data[0], all_data[1]
def preprocess_employment_model_data(input_data, work_hours):
data = input_data.iloc[:, 0:24]
labels = input_data["label"]
morning = work_hours[0]
afternoon = work_hours[1]
data["morn"] = data.iloc[:, morning[0] - 1 : morning[1]].sum(axis=1)
data["aft"] = data.iloc[:, afternoon[0] - 1 : afternoon[1]].sum(axis=1)
return data, labels
def load_week_start_times_data():
print("Loading Weekly Employment Summary Data: ")
week_data = os.path.join(
os.getcwd(), "data", "processed", "yearly_week_start_times.json"
)
yearly_week_summary_data = json.load(open(week_data, "r"))
return yearly_week_summary_data
def load_lgbm_model(model_file_name="lgbm_employment_classifier.txt"):
lgbm_model = lgbm.Booster(
model_file=os.path.join(os.getcwd(), "models", model_file_name)
)
return lgbm_model
def load_logreg_model(model_file_name="logreg_employment_model.pkl"):
logreg_model = pickle.load(
open(os.path.join(os.getcwd(), "models", model_file_name), "rb")
)
return logreg_model
def load_logreg_model_results(data_set):
print("Loading " + data_set + " LogReg Model Results: ")
if data_set == "test":
logreg_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "test_logreg_model*.csv")
)[0]
)
elif data_set == "train":
logreg_results = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(), "data", "processed", "train_logreg_model*.csv"
)
)[0]
)
return logreg_results
def load_lgbm_model_results(data_set):
print("Loading " + data_set + " LogReg Model Results: ")
if data_set == "test":
lgbm_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "test_lgbm_model*.csv")
)[0]
)
elif data_set == "train":
lgbm_results = pd.read_csv(
glob.glob(
os.path.join(os.getcwd(), "data", "processed", "train_lgbm_model*.csv")
)[0]
)
return lgbm_results
def load_lgbm_heatmap(data_set):
lgbm_heatmap = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(), "data", "processed", data_set + "_lgbm_model_heatmap*.csv"
)
)[0]
)
return lgbm_heatmap
def load_logreg_heatmap(data_set):
logreg_heatmap = pd.read_csv(
glob.glob(
os.path.join(
os.getcwd(),
"data",
"processed",
data_set + "_logreg_model_heatmap*.csv",
)
)[0]
)
return logreg_heatmap
def generate_employment_prediction_model_data(
activity_df, start_year, end_year, start_month, end_month, label
):
summary_data = {}
train_data = []
for year in range(start_year, end_year + 1):
summary_data[str(year)] = {}
begin_month = 1
stop_month = 12 + 1 # Add one to account for indexing up to but not including
if year == start_year:
begin_month = start_month
if year == end_year:
stop_month = (
end_month + 1
) # Add one to account for indexing up to but not including
print(f"{year} {begin_month} {stop_month}")
for month in range(begin_month, stop_month):
summary_data[str(year)][str(month)] = {}
print(f"\t{month}")
# for month in range(quarter*3, quarter*3+3):
# print(f'\t\t{month}')
for day in range(0, 7):
# print(f'\tProcessing Day {day}')
summary_data[str(year)][str(month)][str(day)] = 24 * [0]
# days_data = quarter_data[pd.DatetimeIndex(quarter_data.start_date_local).weekday == day]
for hour in range(0, 24):
# print(f'\t\tAccumulating Hour {hour}')
# hours_data = days_data.set_index('start_date_local')[pd.DatetimeIndex(days_data.start_date_local).hour == hour]
hours_data = activity_df[
(pd.DatetimeIndex(activity_df.start_date_local).year == year)
& (
pd.DatetimeIndex(activity_df.start_date_local).month
== month
)
& (
pd.DatetimeIndex(activity_df.start_date_local).weekday
== day
)
& (pd.DatetimeIndex(activity_df.start_date_local).hour == hour)
]
summary_data[str(year)][str(month)][str(day)][hour] = len(
hours_data
)
week_days = np.array(24 * [0])
# Calculate what eprcentag of workout start times occur at each hour in the day.
for day in range(0, 5):
week_days += summary_data[str(year)][str(month)][str(day)]
week_days_perc = week_days / sum(week_days)
month_data = np.append(week_days_perc, [label, year, month])
train_data.append(month_data)
# week_days_perc = pd.DataFrame(data=week_days_perc, columns=['exercise_start'])
return summary_data, train_data
def generate_weekly_start_time_dict(activity_df, year):
week_summary_data = {}
# week_summary_data[str(year)] = {}
years_data = activity_df[
pd.DatetimeIndex(activity_df.start_date_local).year == year
]
for day in range(0, 7):
# print(f'\tProcessing Day {day}')
week_summary_data[str(day)] = 24 * [0]
days_data = years_data[
pd.DatetimeIndex(years_data.start_date_local).weekday == day
]
for hour in range(0, 24):
# print(f'\t\tAccumulating Hour {hour}')
hours_data = days_data.set_index("start_date_local_raw")[
pd.DatetimeIndex(days_data.start_date_local).hour == hour
]
week_summary_data[str(day)][hour] = len(hours_data)
return week_summary_data
| [
"pandas.read_csv",
"os.getcwd",
"time.process_time",
"google.cloud.bigquery.Client",
"time.time",
"pandas.DatetimeIndex",
"numpy.append",
"pandas.to_datetime",
"numpy.array",
"os.path.join"
] | [((279, 290), 'time.time', 'time.time', ([], {}), '()\n', (288, 290), False, 'import time\n'), ((1789, 1851), 'pandas.to_datetime', 'pd.to_datetime', (["processed_df['start_date_local_raw']"], {'unit': '"""s"""'}), "(processed_df['start_date_local_raw'], unit='s')\n", (1803, 1851), True, 'import pandas as pd\n'), ((3104, 3123), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3121, 3123), False, 'import time\n'), ((3142, 3205), 'os.path.join', 'os.path.join', (['model_data_file_path', '"""train_employment_data.csv"""'], {}), "(model_data_file_path, 'train_employment_data.csv')\n", (3154, 3205), False, 'import os\n'), ((3222, 3284), 'os.path.join', 'os.path.join', (['model_data_file_path', '"""test_employment_data.csv"""'], {}), "(model_data_file_path, 'test_employment_data.csv')\n", (3234, 3284), False, 'import os\n'), ((395, 412), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (410, 412), False, 'from google.cloud import bigquery\n'), ((3375, 3389), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (3386, 3389), True, 'import pandas as pd\n'), ((3956, 3967), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3965, 3967), False, 'import os\n'), ((2984, 2995), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2993, 2995), False, 'import os\n'), ((8380, 8398), 'numpy.array', 'np.array', (['(24 * [0])'], {}), '(24 * [0])\n', (8388, 8398), True, 'import numpy as np\n'), ((8686, 8733), 'numpy.append', 'np.append', (['week_days_perc', '[label, year, month]'], {}), '(week_days_perc, [label, year, month])\n', (8695, 8733), True, 'import numpy as np\n'), ((4265, 4276), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4274, 4276), False, 'import os\n'), ((4465, 4476), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4474, 4476), False, 'import os\n'), ((9073, 9119), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['activity_df.start_date_local'], {}), '(activity_df.start_date_local)\n', (9089, 9119), True, 'import pandas as pd\n'), ((5788, 5799), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5797, 5799), False, 'import os\n'), ((6051, 6062), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6060, 6062), False, 'import os\n'), ((9304, 9349), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['years_data.start_date_local'], {}), '(years_data.start_date_local)\n', (9320, 9349), True, 'import pandas as pd\n'), ((1050, 1061), 'time.time', 'time.time', ([], {}), '()\n', (1059, 1061), False, 'import time\n'), ((4766, 4777), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4775, 4777), False, 'import os\n'), ((5345, 5356), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5354, 5356), False, 'import os\n'), ((9551, 9595), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['days_data.start_date_local'], {}), '(days_data.start_date_local)\n', (9567, 9595), True, 'import pandas as pd\n'), ((4994, 5005), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5003, 5005), False, 'import os\n'), ((5548, 5559), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5557, 5559), False, 'import os\n'), ((8135, 8181), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['activity_df.start_date_local'], {}), '(activity_df.start_date_local)\n', (8151, 8181), True, 'import pandas as pd\n'), ((7992, 8038), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['activity_df.start_date_local'], {}), '(activity_df.start_date_local)\n', (8008, 8038), True, 'import pandas as pd\n'), ((7703, 7749), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['activity_df.start_date_local'], {}), '(activity_df.start_date_local)\n', (7719, 7749), True, 'import pandas as pd\n'), ((7820, 7866), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['activity_df.start_date_local'], {}), '(activity_df.start_date_local)\n', (7836, 7866), True, 'import pandas as pd\n')] |
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for functions in model_utils.py."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
from scenic.model_lib.base_models import model_utils
class SimpleGatherTest(parameterized.TestCase):
"""Test simple_gather()."""
def test_simple_gather_ndarray(self):
"""Test against manually specified target when idx is a nd-array."""
x = jnp.array(np.random.normal(size=(2, 3, 5)), dtype=jnp.float32)
idx = jnp.array([[1, 0, 2], [2, 1, 0]], dtype=jnp.int32)
y = model_utils.simple_gather(x, idx)
y_target = jnp.stack([
jnp.stack([x[0, 1], x[0, 0], x[0, 2]]),
jnp.stack([x[1, 2], x[1, 1], x[1, 0]])])
self.assertSequenceAlmostEqual(y.flatten(), y_target.flatten())
class LossTest(parameterized.TestCase):
"""Test various loss functions in model_utils."""
def test_weighted_l1_loss(self):
"""Test weighted_l1_loss against a manually specified target."""
x = jnp.array([[0.1, 0.3], [-1.0, 0.2]], dtype=jnp.float32)
y = jnp.array([[0.5, -1.3], [0.9, 1.2]], dtype=jnp.float32)
out1 = model_utils.weighted_l1_loss(x, y, reduction=None)
out1_target = jnp.array([[0.4, 1.6], [1.9, 1.0]], dtype=jnp.float32)
self.assertSequenceAlmostEqual(
out1.flatten(), out1_target.flatten(), places=5)
out2 = model_utils.weighted_l1_loss(x, y, reduction='mean').item()
out2_target = 4.9 / 4
self.assertAlmostEqual(out2, out2_target, places=5)
def test_weighted_box_l1_loss(self):
"""Test weighted_box_l1_loss against manually specified targets."""
x1 = jnp.array([[0.1, 0.3, 0.9, 0.8]], dtype=jnp.float32)
y1 = jnp.array([[0.5, 0.1, 0.9, 0.7]], dtype=jnp.float32)
out1 = model_utils.weighted_box_l1_loss(x1, y1)
out1_target = jnp.array([[0.4, 0.2, 0, 0.1]], dtype=jnp.float32)
self.assertSequenceAlmostEqual(
out1.flatten(), out1_target.flatten(), places=5)
out2 = model_utils.weighted_box_l1_loss(x1, y1, reduction='mean').item()
out2_target = jnp.mean(out1_target).item()
self.assertAlmostEqual(out2, out2_target, places=5)
out3 = model_utils.weighted_box_l1_loss(x1, y1, tight=False)
out3_target = jnp.array([[0.4, 0.0, 0.0, 0.1]], dtype=jnp.float32)
self.assertSequenceAlmostEqual(
out3.flatten(), out3_target.flatten(), places=5)
def test_weighted_sigmoid_cross_entropy(self):
"""Tests weighted_sigmoid_cross_entropy."""
logits = jnp.array([[1, 2, 3], [4, 5, 6]], dtype=jnp.float32)
labels = jnp.array([[0, 1, 1], [1, 0, 1]], dtype=jnp.float32)
sigmoid = jax.nn.sigmoid
log = jnp.log
loss = model_utils.weighted_sigmoid_cross_entropy(logits, labels)
gt_loss = jnp.array([[
-log(1 - sigmoid(1.)), -log(sigmoid(2.)), -log(sigmoid(3.))
], [-log(sigmoid(4.)), -log(1 - sigmoid(5.)), -log(sigmoid(6.))]
]) / np.prod(labels.shape[:-1])
self.assertSequenceAlmostEqual(
loss.flatten(), gt_loss.sum().flatten(), places=3)
example_weights = jnp.array([1., 0.])
loss = model_utils.weighted_sigmoid_cross_entropy(
logits, labels, weights=example_weights)
gt_loss = jnp.array([[
-log(1 - sigmoid(1.)), -log(sigmoid(2.)), -log(sigmoid(3.))
], [0., 0., 0.]]) / example_weights.sum() + 1e-9
self.assertSequenceAlmostEqual(
loss.flatten(), gt_loss.sum().flatten(), places=3)
label_weights = jnp.array([1., 2., 3.])
loss = model_utils.weighted_sigmoid_cross_entropy(
logits, labels, label_weights=label_weights)
gt_loss = jnp.array([[
-log(1 - sigmoid(1.)), -2 * log(sigmoid(2.)), -3 * log(sigmoid(3.))
], [-log(sigmoid(4.)), -2 * log(1 - sigmoid(5.)), -3 * log(sigmoid(6.))]
]) / np.prod(labels.shape[:-1])
self.assertSequenceAlmostEqual(
loss.flatten(), gt_loss.sum().flatten(), places=3)
loss = model_utils.weighted_sigmoid_cross_entropy(
logits, labels, weights=example_weights, label_weights=label_weights)
gt_loss = jnp.array([[
-log(1 - sigmoid(1.)), -2 * log(sigmoid(2.)), -3 * log(sigmoid(3.))
], [0., 0., 0.]]) / example_weights.sum() + 1e-9
self.assertSequenceAlmostEqual(
loss.flatten(), gt_loss.sum().flatten(), places=3)
# Label weights can actually be any shape that is broadcastable to the
# shape of logits.
label_weights = jnp.array([[1., 2., 3.], [4., 5., 6.]])
loss = model_utils.weighted_sigmoid_cross_entropy(
logits, labels, weights=example_weights, label_weights=label_weights)
gt_loss = jnp.array([[
-log(1 - sigmoid(1.)), -2 * log(sigmoid(2.)), -3 * log(sigmoid(3.))
], [0., 0., 0.]]) / example_weights.sum() + 1e-9
self.assertSequenceAlmostEqual(
loss.flatten(), gt_loss.sum().flatten(), places=3)
with self.assertRaises(ValueError):
label_weights = jnp.array([1., 2., 3., 4.])
loss = model_utils.weighted_sigmoid_cross_entropy(
logits, labels, label_weights=label_weights)
def test_focal_sigmoid_cross_entropy(self):
"""Tests focal_sigmoid_cross_entropy."""
logits = jnp.array([[1, 2, 3], [4, 5, 6]], dtype=jnp.float32)
labels = jnp.array([[0, 1, 1], [1, 0, 1]], dtype=jnp.float32)
sigmoid = jax.nn.sigmoid
log = jnp.log
a = 0.25
g = 2.
loss = model_utils.focal_sigmoid_cross_entropy(
logits, labels, alpha=a, gamma=g)
gt_loss = jnp.array(
[[-log(1 - sigmoid(1.)), -log(sigmoid(2.)), -log(sigmoid(3.))],
[-log(sigmoid(4.)), -log(1 - sigmoid(5.)), -log(sigmoid(6.))]])
focal_factor = jnp.array([[
(1 - a) * sigmoid(1.)**g, a * sigmoid(-2.)**g, a * sigmoid(-3.)**g
], [a * sigmoid(-4.)**g, (1 - a) * sigmoid(5.)**g, a * sigmoid(-6.)**g]])
self.assertSequenceAlmostEqual(
loss.flatten(), (gt_loss * focal_factor).flatten(), places=3)
def test_dice_loss(self):
"""Tests the correctness of the segmentation dice loss."""
# Create test targets:
batch, num_objects, h, w = 1, 2, 128, 128
stride = 2
targets = np.zeros((batch, num_objects, h, w), dtype=np.float32)
targets[0, 0, :64, :64] = 1.0 # Add object in top left of image.
targets[0, 1, 64:, 64:] = 1.0 # Add object in bottom right of image.
input_shape = batch, num_objects, h // stride, w // stride
# Test perfect predictions:
inputs = np.zeros(input_shape, dtype=np.float32)
inputs[0, 0, :64 // stride, :64 // stride] = 1.0
inputs[0, 1, 64 // stride:, 64 // stride:] = 1.0
inputs = (inputs - 0.5) * 1e6 # Inputs will be passed through sigmoid.
loss = model_utils.dice_loss(
jnp.array(inputs), jnp.array(targets), interpolation='nearest')
np.testing.assert_array_almost_equal(loss, [[0.0, 0.0]], decimal=3)
# Test one half-overlapping prediction:
inputs = np.zeros(input_shape, dtype=np.float32)
inputs[0, 0, 32 // stride:(32 + 64) // stride, :64 // stride] = 1.0
inputs[0, 1, 64 // stride:, 64 // stride:] = 1.0
inputs = (inputs - 0.5) * 1e6 # Inputs will be passed through sigmoid.
loss = model_utils.dice_loss(
jnp.array(inputs), jnp.array(targets), interpolation='nearest')
np.testing.assert_array_almost_equal(loss, [[0.5, 0.0]], decimal=3)
# Test one non-overlapping prediction:
inputs = np.zeros(input_shape, dtype=np.float32)
inputs[0, 0, 64 // stride:, 64 // stride:] = 1.0
inputs[0, 1, 64 // stride:, 64 // stride:] = 1.0
inputs = (inputs - 0.5) * 1e6 # Inputs will be passed through sigmoid.
loss = model_utils.dice_loss(
jnp.array(inputs), jnp.array(targets), interpolation='nearest')
np.testing.assert_array_almost_equal(loss, [[1.0, 0.0]], decimal=3)
# Test all-pairs with different instance numbers:
inputs = np.zeros((batch, 3, h // stride, w // stride), dtype=np.float32)
inputs[0, 0, :64 // stride, :64 // stride] = 1.0
inputs[0, 1, 32 // stride:(32 + 64) // stride, :64 // stride] = 1.0
inputs[0, 2, 64 // stride:, 64 // stride:] = 1.0
inputs = (inputs - 0.5) * 1e6 # Inputs will be passed through sigmoid.
loss = model_utils.dice_loss(
jnp.array(inputs), jnp.array(targets), interpolation='nearest',
all_pairs=True)
self.assertTupleEqual(loss.shape, (1, 3, 2)) # [b, n_pred, n_true]
np.testing.assert_array_almost_equal(loss, [[[0.0, 1.0],
[0.5, 1.0],
[1.0, 0.0]]], decimal=3)
def test_weighted_square_error(self):
"""Tests implementation of squared error."""
predictions = jnp.array([
[
[1.0, 3.0, 5.0, 6.0],
[3.0, 5.0, 11.0, 10.0],
[9.0, 10.0, 11.0, 12.0],
[14.0, 13.0, 14.0, 17.0],
],
[
[17.0, 18.0, 21.0, 22.0],
[20.0, 19.0, 24.0, 25.0],
[27.0, 29.0, 30.0, 32.0],
[27.0, 28.0, 33.0, 32.0],
],
])
targets = jnp.arange(1, 33).reshape(2, 4, 4)
# Without specifying axis, this will be over the last two dimensions.
loss = model_utils.weighted_mean_squared_error(predictions, targets)
expected_loss = jnp.mean(jnp.array([38.0, 70.0]))
self.assertAlmostEqual(loss, expected_loss, places=5)
# Test by specifying axes as a tuple. The following are all equivalent to
# the previous test.
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=(1, 2))
self.assertAlmostEqual(loss, expected_loss, places=5)
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=(-1, -2))
self.assertAlmostEqual(loss, expected_loss, places=5)
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=(2, 1))
self.assertAlmostEqual(loss, expected_loss, places=5)
# Test by computing loss over a single axis only.
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=-1)
expected_loss = jnp.mean(jnp.array([[9, 25, 0, 4],
[8, 12, 38, 12]]))
self.assertAlmostEqual(loss, expected_loss, places=5)
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=2)
self.assertAlmostEqual(loss, expected_loss, places=5)
loss = model_utils.weighted_mean_squared_error(predictions, targets,
axis=1)
expected_loss = jnp.mean(jnp.array([[5, 3, 21, 9],
[9, 22, 18, 21]]))
self.assertAlmostEqual(loss, expected_loss, places=5)
# Test with loss weights.
weights = jnp.array([[1, 1, 1, 0], [0, 1, 1, 0]])
loss = model_utils.weighted_mean_squared_error(predictions, targets,
weights, axis=-1)
expected_loss = jnp.mean(jnp.array([9, 25, 12, 38, 0]))
self.assertAlmostEqual(loss, expected_loss, places=5)
weights = jnp.array([1, 0])
loss = model_utils.weighted_mean_squared_error(predictions, targets,
weights, axis=-1)
expected_loss = jnp.mean(jnp.array([9, 25, 0, 4]))
self.assertAlmostEqual(loss, expected_loss, places=5)
class MetricTest(parameterized.TestCase):
"""Tests the metric computation related utilities."""
def is_valid(self, t, value_name):
"""Helper function to assert that tensor `t` does not have `nan`, `inf`."""
self.assertFalse(
jnp.isnan(t).any(), msg=f'Found nan\'s in {t} for {value_name}')
self.assertFalse(
jnp.isinf(t).any(), msg=f'Found inf\'s in {t} for {value_name}')
def test_weighted_topk_correctly_classified(self):
"""Tests the topk accuracy computation."""
batch_size = 512
num_of_classes = 100
logits = jnp.array(
np.random.normal(size=(batch_size, num_of_classes)), dtype=jnp.float32)
labels = jnp.array(np.random.randint(num_of_classes, size=(batch_size,)))
one_hot_targets = common_utils.onehot(labels, logits.shape[-1])
classification_accuracy = model_utils.weighted_correctly_classified(
logits, one_hot_targets)
top_one_accuracy = model_utils.weighted_topk_correctly_classified(
logits, one_hot_targets, k=1)
self.assertSequenceAlmostEqual(
classification_accuracy.flatten(), top_one_accuracy.flatten())
top_n_accuracy = model_utils.weighted_topk_correctly_classified(
logits, one_hot_targets, k=num_of_classes)
self.assertEqual(jnp.mean(top_n_accuracy), 1)
# computes using numpy
top_5_accuracy = model_utils.weighted_topk_correctly_classified(
logits, one_hot_targets, k=5)
top5_pred = np.argsort(
np.reshape(logits, [-1, num_of_classes]), axis=1)[:, -5:]
y_true = np.array(labels)
top5_pred = np.reshape(top5_pred, [-1, 5])
y_true = np.reshape(y_true, [-1])
np_top_accuracy = np.array(
[y_true[i] in top5_pred[i, :] for i in range(len(y_true))])
self.assertSequenceAlmostEqual(top_5_accuracy.flatten(),
np_top_accuracy.flatten())
def test_weighted_recall(self):
"""Tests the topk recall computation."""
logits = np.array([[[2, 3, 4],
[4, 3, 2],
[4, 2, 3],
[3, 2, 4],
[4, 2, 3],
]])
labels = np.array([[[1, 1, 0],
[1, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 0]
]])
batch_size = 8
logits = jnp.tile(logits, [batch_size, 1, 1])
labels = jnp.tile(labels, [batch_size, 1, 1])
recall = model_utils.weighted_recall(logits, labels)
recall_expected = np.array([[1/2, 1., 1., 0., 0.]] * batch_size)
self.assertSequenceAlmostEqual(
recall.flatten(), recall_expected.flatten())
@parameterized.parameters(itertools.product([1., 0.], [1., 0.]))
def test_weighted_top_one_correctly_classified(self, label_multiplier,
weight_multiplier):
"""Tests the top1 correct computation."""
batch_size = 512
num_of_classes = 100
logits = jnp.array(np.random.normal(
size=(batch_size, 50, num_of_classes)), dtype=jnp.float32)
labels = jnp.array(np.random.randint(
0, 2, size=(batch_size, 50, num_of_classes)))
labels *= label_multiplier
weights = jnp.ones(shape=(batch_size,), dtype=jnp.float32)
weights *= weight_multiplier
is_correct_array = model_utils.weighted_top_one_correctly_classified(
logits, labels, weights=weights)
num_correct = jnp.sum(is_correct_array)
is_correct_array_ref = model_utils.weighted_topk_correctly_classified(
logits, labels, weights, k=1)
np.testing.assert_array_almost_equal(
is_correct_array, is_correct_array_ref)
np.testing.assert_equal(np.sum(is_correct_array),
np.sum(is_correct_array_ref))
self.is_valid(num_correct, 'Number of correctly classified')
@parameterized.parameters(itertools.product([1., 0.], [1., 0.]))
def test_weighted_unnormalized_sigmoid_cross_entropy(self, label_multiplier,
weight_multiplier):
"""Tests the unnormalized sigmoid cross entropy computation."""
batch_size = 512
num_of_classes = 100
logits = jnp.array(
np.random.normal(size=(batch_size, num_of_classes)), dtype=jnp.float32)
labels = jnp.array(np.random.randint(0, 2,
size=(batch_size, num_of_classes)))
labels *= label_multiplier
weights = jnp.ones(shape=(batch_size,), dtype=jnp.float32)
weights *= weight_multiplier
loss_array = model_utils.weighted_unnormalized_sigmoid_cross_entropy(
logits, labels, weights=weights)
loss_sum = jnp.sum(loss_array)
self.is_valid(loss_sum, 'Loss value')
@parameterized.parameters(itertools.product([1., 0.], [1., 0.]))
def test_weighted_unnormalized_softmax_cross_entropy(self, label_multiplier,
weight_multiplier):
"""Tests the unnormalized softmax cross entropy computation."""
batch_size = 512
num_of_classes = 100
logits = jnp.array(
np.random.normal(size=(batch_size, num_of_classes)), dtype=jnp.float32)
labels = jnp.array(
np.random.randint(0, 2, size=(batch_size, num_of_classes)))
labels *= label_multiplier
weights = jnp.ones(shape=(batch_size,), dtype=jnp.float32)
weights *= weight_multiplier
loss_array = model_utils.weighted_unnormalized_softmax_cross_entropy(
logits, labels, weights=weights)
loss_sum = jnp.sum(loss_array)
self.is_valid(loss_sum, 'Loss value')
if __name__ == '__main__':
absltest.main()
| [
"scenic.model_lib.base_models.model_utils.weighted_correctly_classified",
"absl.testing.absltest.main",
"numpy.sum",
"scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy",
"numpy.random.randint",
"numpy.random.normal",
"numpy.testing.assert_array_almost_equal",
"scenic.model_lib.ba... | [((17739, 17754), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (17752, 17754), False, 'from absl.testing import absltest\n'), ((1146, 1196), 'jax.numpy.array', 'jnp.array', (['[[1, 0, 2], [2, 1, 0]]'], {'dtype': 'jnp.int32'}), '([[1, 0, 2], [2, 1, 0]], dtype=jnp.int32)\n', (1155, 1196), True, 'import jax.numpy as jnp\n'), ((1205, 1238), 'scenic.model_lib.base_models.model_utils.simple_gather', 'model_utils.simple_gather', (['x', 'idx'], {}), '(x, idx)\n', (1230, 1238), False, 'from scenic.model_lib.base_models import model_utils\n'), ((1639, 1694), 'jax.numpy.array', 'jnp.array', (['[[0.1, 0.3], [-1.0, 0.2]]'], {'dtype': 'jnp.float32'}), '([[0.1, 0.3], [-1.0, 0.2]], dtype=jnp.float32)\n', (1648, 1694), True, 'import jax.numpy as jnp\n'), ((1703, 1758), 'jax.numpy.array', 'jnp.array', (['[[0.5, -1.3], [0.9, 1.2]]'], {'dtype': 'jnp.float32'}), '([[0.5, -1.3], [0.9, 1.2]], dtype=jnp.float32)\n', (1712, 1758), True, 'import jax.numpy as jnp\n'), ((1771, 1821), 'scenic.model_lib.base_models.model_utils.weighted_l1_loss', 'model_utils.weighted_l1_loss', (['x', 'y'], {'reduction': 'None'}), '(x, y, reduction=None)\n', (1799, 1821), False, 'from scenic.model_lib.base_models import model_utils\n'), ((1840, 1894), 'jax.numpy.array', 'jnp.array', (['[[0.4, 1.6], [1.9, 1.0]]'], {'dtype': 'jnp.float32'}), '([[0.4, 1.6], [1.9, 1.0]], dtype=jnp.float32)\n', (1849, 1894), True, 'import jax.numpy as jnp\n'), ((2263, 2315), 'jax.numpy.array', 'jnp.array', (['[[0.1, 0.3, 0.9, 0.8]]'], {'dtype': 'jnp.float32'}), '([[0.1, 0.3, 0.9, 0.8]], dtype=jnp.float32)\n', (2272, 2315), True, 'import jax.numpy as jnp\n'), ((2325, 2377), 'jax.numpy.array', 'jnp.array', (['[[0.5, 0.1, 0.9, 0.7]]'], {'dtype': 'jnp.float32'}), '([[0.5, 0.1, 0.9, 0.7]], dtype=jnp.float32)\n', (2334, 2377), True, 'import jax.numpy as jnp\n'), ((2390, 2430), 'scenic.model_lib.base_models.model_utils.weighted_box_l1_loss', 'model_utils.weighted_box_l1_loss', (['x1', 'y1'], {}), '(x1, y1)\n', (2422, 2430), False, 'from scenic.model_lib.base_models import model_utils\n'), ((2449, 2499), 'jax.numpy.array', 'jnp.array', (['[[0.4, 0.2, 0, 0.1]]'], {'dtype': 'jnp.float32'}), '([[0.4, 0.2, 0, 0.1]], dtype=jnp.float32)\n', (2458, 2499), True, 'import jax.numpy as jnp\n'), ((2786, 2839), 'scenic.model_lib.base_models.model_utils.weighted_box_l1_loss', 'model_utils.weighted_box_l1_loss', (['x1', 'y1'], {'tight': '(False)'}), '(x1, y1, tight=False)\n', (2818, 2839), False, 'from scenic.model_lib.base_models import model_utils\n'), ((2858, 2910), 'jax.numpy.array', 'jnp.array', (['[[0.4, 0.0, 0.0, 0.1]]'], {'dtype': 'jnp.float32'}), '([[0.4, 0.0, 0.0, 0.1]], dtype=jnp.float32)\n', (2867, 2910), True, 'import jax.numpy as jnp\n'), ((3116, 3168), 'jax.numpy.array', 'jnp.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'jnp.float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=jnp.float32)\n', (3125, 3168), True, 'import jax.numpy as jnp\n'), ((3182, 3234), 'jax.numpy.array', 'jnp.array', (['[[0, 1, 1], [1, 0, 1]]'], {'dtype': 'jnp.float32'}), '([[0, 1, 1], [1, 0, 1]], dtype=jnp.float32)\n', (3191, 3234), True, 'import jax.numpy as jnp\n'), ((3294, 3352), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {}), '(logits, labels)\n', (3336, 3352), False, 'from scenic.model_lib.base_models import model_utils\n'), ((3691, 3712), 'jax.numpy.array', 'jnp.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (3700, 3712), True, 'import jax.numpy as jnp\n'), ((3722, 3810), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {'weights': 'example_weights'}), '(logits, labels, weights=\n example_weights)\n', (3764, 3810), False, 'from scenic.model_lib.base_models import model_utils\n'), ((4079, 4105), 'jax.numpy.array', 'jnp.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (4088, 4105), True, 'import jax.numpy as jnp\n'), ((4114, 4206), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {'label_weights': 'label_weights'}), '(logits, labels, label_weights=\n label_weights)\n', (4156, 4206), False, 'from scenic.model_lib.base_models import model_utils\n'), ((4554, 4671), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {'weights': 'example_weights', 'label_weights': 'label_weights'}), '(logits, labels, weights=\n example_weights, label_weights=label_weights)\n', (4596, 4671), False, 'from scenic.model_lib.base_models import model_utils\n'), ((5046, 5091), 'jax.numpy.array', 'jnp.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n', (5055, 5091), True, 'import jax.numpy as jnp\n'), ((5097, 5214), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {'weights': 'example_weights', 'label_weights': 'label_weights'}), '(logits, labels, weights=\n example_weights, label_weights=label_weights)\n', (5139, 5214), False, 'from scenic.model_lib.base_models import model_utils\n'), ((5778, 5830), 'jax.numpy.array', 'jnp.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'jnp.float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=jnp.float32)\n', (5787, 5830), True, 'import jax.numpy as jnp\n'), ((5844, 5896), 'jax.numpy.array', 'jnp.array', (['[[0, 1, 1], [1, 0, 1]]'], {'dtype': 'jnp.float32'}), '([[0, 1, 1], [1, 0, 1]], dtype=jnp.float32)\n', (5853, 5896), True, 'import jax.numpy as jnp\n'), ((5980, 6053), 'scenic.model_lib.base_models.model_utils.focal_sigmoid_cross_entropy', 'model_utils.focal_sigmoid_cross_entropy', (['logits', 'labels'], {'alpha': 'a', 'gamma': 'g'}), '(logits, labels, alpha=a, gamma=g)\n', (6019, 6053), False, 'from scenic.model_lib.base_models import model_utils\n'), ((6719, 6773), 'numpy.zeros', 'np.zeros', (['(batch, num_objects, h, w)'], {'dtype': 'np.float32'}), '((batch, num_objects, h, w), dtype=np.float32)\n', (6727, 6773), True, 'import numpy as np\n'), ((7027, 7066), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'np.float32'}), '(input_shape, dtype=np.float32)\n', (7035, 7066), True, 'import numpy as np\n'), ((7359, 7426), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loss', '[[0.0, 0.0]]'], {'decimal': '(3)'}), '(loss, [[0.0, 0.0]], decimal=3)\n', (7395, 7426), True, 'import numpy as np\n'), ((7485, 7524), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'np.float32'}), '(input_shape, dtype=np.float32)\n', (7493, 7524), True, 'import numpy as np\n'), ((7836, 7903), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loss', '[[0.5, 0.0]]'], {'decimal': '(3)'}), '(loss, [[0.5, 0.0]], decimal=3)\n', (7872, 7903), True, 'import numpy as np\n'), ((7961, 8000), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'np.float32'}), '(input_shape, dtype=np.float32)\n', (7969, 8000), True, 'import numpy as np\n'), ((8293, 8360), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loss', '[[1.0, 0.0]]'], {'decimal': '(3)'}), '(loss, [[1.0, 0.0]], decimal=3)\n', (8329, 8360), True, 'import numpy as np\n'), ((8429, 8493), 'numpy.zeros', 'np.zeros', (['(batch, 3, h // stride, w // stride)'], {'dtype': 'np.float32'}), '((batch, 3, h // stride, w // stride), dtype=np.float32)\n', (8437, 8493), True, 'import numpy as np\n'), ((8954, 9052), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['loss', '[[[0.0, 1.0], [0.5, 1.0], [1.0, 0.0]]]'], {'decimal': '(3)'}), '(loss, [[[0.0, 1.0], [0.5, 1.0], [1.0, \n 0.0]]], decimal=3)\n', (8990, 9052), True, 'import numpy as np\n'), ((9255, 9480), 'jax.numpy.array', 'jnp.array', (['[[[1.0, 3.0, 5.0, 6.0], [3.0, 5.0, 11.0, 10.0], [9.0, 10.0, 11.0, 12.0], [\n 14.0, 13.0, 14.0, 17.0]], [[17.0, 18.0, 21.0, 22.0], [20.0, 19.0, 24.0,\n 25.0], [27.0, 29.0, 30.0, 32.0], [27.0, 28.0, 33.0, 32.0]]]'], {}), '([[[1.0, 3.0, 5.0, 6.0], [3.0, 5.0, 11.0, 10.0], [9.0, 10.0, 11.0,\n 12.0], [14.0, 13.0, 14.0, 17.0]], [[17.0, 18.0, 21.0, 22.0], [20.0, \n 19.0, 24.0, 25.0], [27.0, 29.0, 30.0, 32.0], [27.0, 28.0, 33.0, 32.0]]])\n', (9264, 9480), True, 'import jax.numpy as jnp\n'), ((9748, 9809), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {}), '(predictions, targets)\n', (9787, 9809), False, 'from scenic.model_lib.base_models import model_utils\n'), ((10037, 10111), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(1, 2)'}), '(predictions, targets, axis=(1, 2))\n', (10076, 10111), False, 'from scenic.model_lib.base_models import model_utils\n'), ((10233, 10309), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(-1, -2)'}), '(predictions, targets, axis=(-1, -2))\n', (10272, 10309), False, 'from scenic.model_lib.base_models import model_utils\n'), ((10431, 10505), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(2, 1)'}), '(predictions, targets, axis=(2, 1))\n', (10470, 10505), False, 'from scenic.model_lib.base_models import model_utils\n'), ((10681, 10751), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(-1)'}), '(predictions, targets, axis=-1)\n', (10720, 10751), False, 'from scenic.model_lib.base_models import model_utils\n'), ((10987, 11056), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(2)'}), '(predictions, targets, axis=2)\n', (11026, 11056), False, 'from scenic.model_lib.base_models import model_utils\n'), ((11178, 11247), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets'], {'axis': '(1)'}), '(predictions, targets, axis=1)\n', (11217, 11247), False, 'from scenic.model_lib.base_models import model_utils\n'), ((11516, 11555), 'jax.numpy.array', 'jnp.array', (['[[1, 1, 1, 0], [0, 1, 1, 0]]'], {}), '([[1, 1, 1, 0], [0, 1, 1, 0]])\n', (11525, 11555), True, 'import jax.numpy as jnp\n'), ((11567, 11646), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets', 'weights'], {'axis': '(-1)'}), '(predictions, targets, weights, axis=-1)\n', (11606, 11646), False, 'from scenic.model_lib.base_models import model_utils\n'), ((11831, 11848), 'jax.numpy.array', 'jnp.array', (['[1, 0]'], {}), '([1, 0])\n', (11840, 11848), True, 'import jax.numpy as jnp\n'), ((11860, 11939), 'scenic.model_lib.base_models.model_utils.weighted_mean_squared_error', 'model_utils.weighted_mean_squared_error', (['predictions', 'targets', 'weights'], {'axis': '(-1)'}), '(predictions, targets, weights, axis=-1)\n', (11899, 11939), False, 'from scenic.model_lib.base_models import model_utils\n'), ((12864, 12909), 'flax.training.common_utils.onehot', 'common_utils.onehot', (['labels', 'logits.shape[-1]'], {}), '(labels, logits.shape[-1])\n', (12883, 12909), False, 'from flax.training import common_utils\n'), ((12940, 13006), 'scenic.model_lib.base_models.model_utils.weighted_correctly_classified', 'model_utils.weighted_correctly_classified', (['logits', 'one_hot_targets'], {}), '(logits, one_hot_targets)\n', (12981, 13006), False, 'from scenic.model_lib.base_models import model_utils\n'), ((13039, 13115), 'scenic.model_lib.base_models.model_utils.weighted_topk_correctly_classified', 'model_utils.weighted_topk_correctly_classified', (['logits', 'one_hot_targets'], {'k': '(1)'}), '(logits, one_hot_targets, k=1)\n', (13085, 13115), False, 'from scenic.model_lib.base_models import model_utils\n'), ((13254, 13348), 'scenic.model_lib.base_models.model_utils.weighted_topk_correctly_classified', 'model_utils.weighted_topk_correctly_classified', (['logits', 'one_hot_targets'], {'k': 'num_of_classes'}), '(logits, one_hot_targets, k=\n num_of_classes)\n', (13300, 13348), False, 'from scenic.model_lib.base_models import model_utils\n'), ((13452, 13528), 'scenic.model_lib.base_models.model_utils.weighted_topk_correctly_classified', 'model_utils.weighted_topk_correctly_classified', (['logits', 'one_hot_targets'], {'k': '(5)'}), '(logits, one_hot_targets, k=5)\n', (13498, 13528), False, 'from scenic.model_lib.base_models import model_utils\n'), ((13645, 13661), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (13653, 13661), True, 'import numpy as np\n'), ((13678, 13708), 'numpy.reshape', 'np.reshape', (['top5_pred', '[-1, 5]'], {}), '(top5_pred, [-1, 5])\n', (13688, 13708), True, 'import numpy as np\n'), ((13722, 13746), 'numpy.reshape', 'np.reshape', (['y_true', '[-1]'], {}), '(y_true, [-1])\n', (13732, 13746), True, 'import numpy as np\n'), ((14064, 14131), 'numpy.array', 'np.array', (['[[[2, 3, 4], [4, 3, 2], [4, 2, 3], [3, 2, 4], [4, 2, 3]]]'], {}), '([[[2, 3, 4], [4, 3, 2], [4, 2, 3], [3, 2, 4], [4, 2, 3]]])\n', (14072, 14131), True, 'import numpy as np\n'), ((14267, 14334), 'numpy.array', 'np.array', (['[[[1, 1, 0], [1, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 0]]]'], {}), '([[[1, 1, 0], [1, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 0]]])\n', (14275, 14334), True, 'import numpy as np\n'), ((14489, 14525), 'jax.numpy.tile', 'jnp.tile', (['logits', '[batch_size, 1, 1]'], {}), '(logits, [batch_size, 1, 1])\n', (14497, 14525), True, 'import jax.numpy as jnp\n'), ((14539, 14575), 'jax.numpy.tile', 'jnp.tile', (['labels', '[batch_size, 1, 1]'], {}), '(labels, [batch_size, 1, 1])\n', (14547, 14575), True, 'import jax.numpy as jnp\n'), ((14590, 14633), 'scenic.model_lib.base_models.model_utils.weighted_recall', 'model_utils.weighted_recall', (['logits', 'labels'], {}), '(logits, labels)\n', (14617, 14633), False, 'from scenic.model_lib.base_models import model_utils\n'), ((14656, 14708), 'numpy.array', 'np.array', (['([[1 / 2, 1.0, 1.0, 0.0, 0.0]] * batch_size)'], {}), '([[1 / 2, 1.0, 1.0, 0.0, 0.0]] * batch_size)\n', (14664, 14708), True, 'import numpy as np\n'), ((15344, 15392), 'jax.numpy.ones', 'jnp.ones', ([], {'shape': '(batch_size,)', 'dtype': 'jnp.float32'}), '(shape=(batch_size,), dtype=jnp.float32)\n', (15352, 15392), True, 'import jax.numpy as jnp\n'), ((15450, 15537), 'scenic.model_lib.base_models.model_utils.weighted_top_one_correctly_classified', 'model_utils.weighted_top_one_correctly_classified', (['logits', 'labels'], {'weights': 'weights'}), '(logits, labels, weights=\n weights)\n', (15499, 15537), False, 'from scenic.model_lib.base_models import model_utils\n'), ((15560, 15585), 'jax.numpy.sum', 'jnp.sum', (['is_correct_array'], {}), '(is_correct_array)\n', (15567, 15585), True, 'import jax.numpy as jnp\n'), ((15613, 15689), 'scenic.model_lib.base_models.model_utils.weighted_topk_correctly_classified', 'model_utils.weighted_topk_correctly_classified', (['logits', 'labels', 'weights'], {'k': '(1)'}), '(logits, labels, weights, k=1)\n', (15659, 15689), False, 'from scenic.model_lib.base_models import model_utils\n'), ((15704, 15780), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['is_correct_array', 'is_correct_array_ref'], {}), '(is_correct_array, is_correct_array_ref)\n', (15740, 15780), True, 'import numpy as np\n'), ((14821, 14862), 'itertools.product', 'itertools.product', (['[1.0, 0.0]', '[1.0, 0.0]'], {}), '([1.0, 0.0], [1.0, 0.0])\n', (14838, 14862), False, 'import itertools\n'), ((16578, 16626), 'jax.numpy.ones', 'jnp.ones', ([], {'shape': '(batch_size,)', 'dtype': 'jnp.float32'}), '(shape=(batch_size,), dtype=jnp.float32)\n', (16586, 16626), True, 'import jax.numpy as jnp\n'), ((16678, 16770), 'scenic.model_lib.base_models.model_utils.weighted_unnormalized_sigmoid_cross_entropy', 'model_utils.weighted_unnormalized_sigmoid_cross_entropy', (['logits', 'labels'], {'weights': 'weights'}), '(logits, labels,\n weights=weights)\n', (16733, 16770), False, 'from scenic.model_lib.base_models import model_utils\n'), ((16791, 16810), 'jax.numpy.sum', 'jnp.sum', (['loss_array'], {}), '(loss_array)\n', (16798, 16810), True, 'import jax.numpy as jnp\n'), ((15997, 16038), 'itertools.product', 'itertools.product', (['[1.0, 0.0]', '[1.0, 0.0]'], {}), '([1.0, 0.0], [1.0, 0.0])\n', (16014, 16038), False, 'import itertools\n'), ((17432, 17480), 'jax.numpy.ones', 'jnp.ones', ([], {'shape': '(batch_size,)', 'dtype': 'jnp.float32'}), '(shape=(batch_size,), dtype=jnp.float32)\n', (17440, 17480), True, 'import jax.numpy as jnp\n'), ((17532, 17624), 'scenic.model_lib.base_models.model_utils.weighted_unnormalized_softmax_cross_entropy', 'model_utils.weighted_unnormalized_softmax_cross_entropy', (['logits', 'labels'], {'weights': 'weights'}), '(logits, labels,\n weights=weights)\n', (17587, 17624), False, 'from scenic.model_lib.base_models import model_utils\n'), ((17645, 17664), 'jax.numpy.sum', 'jnp.sum', (['loss_array'], {}), '(loss_array)\n', (17652, 17664), True, 'import jax.numpy as jnp\n'), ((16883, 16924), 'itertools.product', 'itertools.product', (['[1.0, 0.0]', '[1.0, 0.0]'], {}), '([1.0, 0.0], [1.0, 0.0])\n', (16900, 16924), False, 'import itertools\n'), ((1083, 1115), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(2, 3, 5)'}), '(size=(2, 3, 5))\n', (1099, 1115), True, 'import numpy as np\n'), ((3546, 3572), 'numpy.prod', 'np.prod', (['labels.shape[:-1]'], {}), '(labels.shape[:-1])\n', (3553, 3572), True, 'import numpy as np\n'), ((4420, 4446), 'numpy.prod', 'np.prod', (['labels.shape[:-1]'], {}), '(labels.shape[:-1])\n', (4427, 4446), True, 'import numpy as np\n'), ((5533, 5564), 'jax.numpy.array', 'jnp.array', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (5542, 5564), True, 'import jax.numpy as jnp\n'), ((5574, 5666), 'scenic.model_lib.base_models.model_utils.weighted_sigmoid_cross_entropy', 'model_utils.weighted_sigmoid_cross_entropy', (['logits', 'labels'], {'label_weights': 'label_weights'}), '(logits, labels, label_weights=\n label_weights)\n', (5616, 5666), False, 'from scenic.model_lib.base_models import model_utils\n'), ((7291, 7308), 'jax.numpy.array', 'jnp.array', (['inputs'], {}), '(inputs)\n', (7300, 7308), True, 'import jax.numpy as jnp\n'), ((7310, 7328), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (7319, 7328), True, 'import jax.numpy as jnp\n'), ((7768, 7785), 'jax.numpy.array', 'jnp.array', (['inputs'], {}), '(inputs)\n', (7777, 7785), True, 'import jax.numpy as jnp\n'), ((7787, 7805), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (7796, 7805), True, 'import jax.numpy as jnp\n'), ((8225, 8242), 'jax.numpy.array', 'jnp.array', (['inputs'], {}), '(inputs)\n', (8234, 8242), True, 'import jax.numpy as jnp\n'), ((8244, 8262), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (8253, 8262), True, 'import jax.numpy as jnp\n'), ((8790, 8807), 'jax.numpy.array', 'jnp.array', (['inputs'], {}), '(inputs)\n', (8799, 8807), True, 'import jax.numpy as jnp\n'), ((8809, 8827), 'jax.numpy.array', 'jnp.array', (['targets'], {}), '(targets)\n', (8818, 8827), True, 'import jax.numpy as jnp\n'), ((9839, 9862), 'jax.numpy.array', 'jnp.array', (['[38.0, 70.0]'], {}), '([38.0, 70.0])\n', (9848, 9862), True, 'import jax.numpy as jnp\n'), ((10832, 10875), 'jax.numpy.array', 'jnp.array', (['[[9, 25, 0, 4], [8, 12, 38, 12]]'], {}), '([[9, 25, 0, 4], [8, 12, 38, 12]])\n', (10841, 10875), True, 'import jax.numpy as jnp\n'), ((11328, 11371), 'jax.numpy.array', 'jnp.array', (['[[5, 3, 21, 9], [9, 22, 18, 21]]'], {}), '([[5, 3, 21, 9], [9, 22, 18, 21]])\n', (11337, 11371), True, 'import jax.numpy as jnp\n'), ((11727, 11756), 'jax.numpy.array', 'jnp.array', (['[9, 25, 12, 38, 0]'], {}), '([9, 25, 12, 38, 0])\n', (11736, 11756), True, 'import jax.numpy as jnp\n'), ((12020, 12044), 'jax.numpy.array', 'jnp.array', (['[9, 25, 0, 4]'], {}), '([9, 25, 0, 4])\n', (12029, 12044), True, 'import jax.numpy as jnp\n'), ((12691, 12742), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, num_of_classes)'}), '(size=(batch_size, num_of_classes))\n', (12707, 12742), True, 'import numpy as np\n'), ((12786, 12839), 'numpy.random.randint', 'np.random.randint', (['num_of_classes'], {'size': '(batch_size,)'}), '(num_of_classes, size=(batch_size,))\n', (12803, 12839), True, 'import numpy as np\n'), ((13374, 13398), 'jax.numpy.mean', 'jnp.mean', (['top_n_accuracy'], {}), '(top_n_accuracy)\n', (13382, 13398), True, 'import jax.numpy as jnp\n'), ((15117, 15172), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, 50, num_of_classes)'}), '(size=(batch_size, 50, num_of_classes))\n', (15133, 15172), True, 'import numpy as np\n'), ((15225, 15287), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(batch_size, 50, num_of_classes)'}), '(0, 2, size=(batch_size, 50, num_of_classes))\n', (15242, 15287), True, 'import numpy as np\n'), ((15818, 15842), 'numpy.sum', 'np.sum', (['is_correct_array'], {}), '(is_correct_array)\n', (15824, 15842), True, 'import numpy as np\n'), ((15872, 15900), 'numpy.sum', 'np.sum', (['is_correct_array_ref'], {}), '(is_correct_array_ref)\n', (15878, 15900), True, 'import numpy as np\n'), ((16336, 16387), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, num_of_classes)'}), '(size=(batch_size, num_of_classes))\n', (16352, 16387), True, 'import numpy as np\n'), ((16431, 16489), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(batch_size, num_of_classes)'}), '(0, 2, size=(batch_size, num_of_classes))\n', (16448, 16489), True, 'import numpy as np\n'), ((17222, 17273), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, num_of_classes)'}), '(size=(batch_size, num_of_classes))\n', (17238, 17273), True, 'import numpy as np\n'), ((17326, 17384), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(batch_size, num_of_classes)'}), '(0, 2, size=(batch_size, num_of_classes))\n', (17343, 17384), True, 'import numpy as np\n'), ((1274, 1312), 'jax.numpy.stack', 'jnp.stack', (['[x[0, 1], x[0, 0], x[0, 2]]'], {}), '([x[0, 1], x[0, 0], x[0, 2]])\n', (1283, 1312), True, 'import jax.numpy as jnp\n'), ((1322, 1360), 'jax.numpy.stack', 'jnp.stack', (['[x[1, 2], x[1, 1], x[1, 0]]'], {}), '([x[1, 2], x[1, 1], x[1, 0]])\n', (1331, 1360), True, 'import jax.numpy as jnp\n'), ((2000, 2052), 'scenic.model_lib.base_models.model_utils.weighted_l1_loss', 'model_utils.weighted_l1_loss', (['x', 'y'], {'reduction': '"""mean"""'}), "(x, y, reduction='mean')\n", (2028, 2052), False, 'from scenic.model_lib.base_models import model_utils\n'), ((2605, 2663), 'scenic.model_lib.base_models.model_utils.weighted_box_l1_loss', 'model_utils.weighted_box_l1_loss', (['x1', 'y1'], {'reduction': '"""mean"""'}), "(x1, y1, reduction='mean')\n", (2637, 2663), False, 'from scenic.model_lib.base_models import model_utils\n'), ((2689, 2710), 'jax.numpy.mean', 'jnp.mean', (['out1_target'], {}), '(out1_target)\n', (2697, 2710), True, 'import jax.numpy as jnp\n'), ((9627, 9644), 'jax.numpy.arange', 'jnp.arange', (['(1)', '(33)'], {}), '(1, 33)\n', (9637, 9644), True, 'import jax.numpy as jnp\n'), ((13574, 13614), 'numpy.reshape', 'np.reshape', (['logits', '[-1, num_of_classes]'], {}), '(logits, [-1, num_of_classes])\n', (13584, 13614), True, 'import numpy as np\n'), ((12352, 12364), 'jax.numpy.isnan', 'jnp.isnan', (['t'], {}), '(t)\n', (12361, 12364), True, 'import jax.numpy as jnp\n'), ((12447, 12459), 'jax.numpy.isinf', 'jnp.isinf', (['t'], {}), '(t)\n', (12456, 12459), True, 'import jax.numpy as jnp\n')] |
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import datetime as dt
import numpy as np
class PlotUtils:
def __init__(self):
pass
# https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes
def plot(x, _xlabel, y, _ylabel):
# handle convergence in first training epoch
if len(x) == 1 and len(y) == 1:
print('Only one training epoch was performed; no plot to show.')
else:
# data
ax = plt.gca()
ax.plot(x, y, color='blue', linewidth=1.5)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
# limits
ax.set_xlim([np.min(x), np.max(x)])
ax.set_ylim([np.min(y), np.max(y)])
# text
ax.set_xlabel(_xlabel)
ax.set_ylabel(_ylabel)
ax.set_title('{} vs {}'.format(_xlabel, _ylabel))
# display
ax.grid()
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.ticker.MaxNLocator",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.gca"
] | [((479, 488), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (486, 488), True, 'import matplotlib.pyplot as plt\n'), ((913, 923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (921, 923), True, 'import matplotlib.pyplot as plt\n'), ((575, 600), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (586, 600), False, 'from matplotlib.ticker import MaxNLocator\n'), ((649, 658), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (655, 658), True, 'import numpy as np\n'), ((660, 669), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (666, 669), True, 'import numpy as np\n'), ((693, 702), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (699, 702), True, 'import numpy as np\n'), ((704, 713), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (710, 713), True, 'import numpy as np\n')] |
from typing import NamedTuple
import numpy as np
from numpy import linspace
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy.stats as stats
from arch.bootstrap import (
CircularBlockBootstrap,
MovingBlockBootstrap,
StationaryBootstrap,
)
from arch.bootstrap.multiple_comparison import MCS, SPA, StepM
class SPAData(NamedTuple):
rng: RandomState
k: int
t: int
benchmark: np.ndarray
models: np.ndarray
index: pd.DatetimeIndex
benchmark_series: pd.Series
benchmark_df: pd.DataFrame
models_df: pd.DataFrame
@pytest.fixture()
def spa_data():
rng = RandomState(23456)
fixed_rng = stats.chi2(10)
t = 1000
k = 500
benchmark = fixed_rng.rvs(t)
models = fixed_rng.rvs((t, k))
index = pd.date_range("2000-01-01", periods=t)
benchmark_series = pd.Series(benchmark, index=index)
benchmark_df = pd.DataFrame(benchmark, index=index)
models_df = pd.DataFrame(models, index=index)
return SPAData(
rng, k, t, benchmark, models, index, benchmark_series, benchmark_df, models_df
)
def test_equivalence(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, block_size=10, reps=100)
spa.seed(23456)
spa.compute()
numpy_pvalues = spa.pvalues
spa = SPA(spa_data.benchmark_df, spa_data.models_df, block_size=10, reps=100)
spa.seed(23456)
spa.compute()
pandas_pvalues = spa.pvalues
assert_series_equal(numpy_pvalues, pandas_pvalues)
def test_variances_and_selection(spa_data):
adj_models = spa_data.models + linspace(-2, 0.5, spa_data.k)
spa = SPA(spa_data.benchmark, adj_models, block_size=10, reps=10)
spa.seed(23456)
spa.compute()
variances = spa._loss_diff_var
loss_diffs = spa._loss_diff
demeaned = spa._loss_diff - loss_diffs.mean(0)
t = loss_diffs.shape[0]
kernel_weights = np.zeros(t)
p = 1 / 10.0
for i in range(1, t):
kernel_weights[i] = ((1.0 - (i / t)) * ((1 - p) ** i)) + (
(i / t) * ((1 - p) ** (t - i))
)
direct_vars = (demeaned ** 2).sum(0) / t
for i in range(1, t):
direct_vars += (
2 * kernel_weights[i] * (demeaned[: t - i, :] * demeaned[i:, :]).sum(0) / t
)
assert_allclose(direct_vars, variances)
selection_criteria = -1.0 * np.sqrt((direct_vars / t) * 2 * np.log(np.log(t)))
valid = loss_diffs.mean(0) >= selection_criteria
assert_equal(valid, spa._valid_columns)
# Bootstrap variances
spa = SPA(spa_data.benchmark, spa_data.models, block_size=10, reps=100, nested=True)
spa.seed(23456)
spa.compute()
spa.reset()
bs = spa.bootstrap.clone(demeaned)
variances = spa._loss_diff_var
bootstrap_variances = t * bs.var(lambda x: x.mean(0), reps=100, recenter=True)
assert_allclose(bootstrap_variances, variances)
def test_pvalues_and_critvals(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=100)
spa.compute()
spa.seed(23456)
simulated_vals = spa._simulated_vals
max_stats = np.max(simulated_vals, 0)
max_loss_diff = np.max(spa._loss_diff.mean(0), 0)
pvalues = np.mean(max_loss_diff <= max_stats, 0)
pvalues = pd.Series(pvalues, index=["lower", "consistent", "upper"])
assert_series_equal(pvalues, spa.pvalues)
crit_vals = np.percentile(max_stats, 90.0, axis=0)
crit_vals = pd.Series(crit_vals, index=["lower", "consistent", "upper"])
assert_series_equal(spa.critical_values(0.10), crit_vals)
def test_errors(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=100)
with pytest.raises(RuntimeError):
spa.pvalues
with pytest.raises(RuntimeError):
spa.critical_values()
with pytest.raises(RuntimeError):
spa.better_models()
with pytest.raises(ValueError):
SPA(spa_data.benchmark, spa_data.models, bootstrap="unknown")
spa.compute()
with pytest.raises(ValueError):
spa.better_models(pvalue_type="unknown")
with pytest.raises(ValueError):
spa.critical_values(pvalue=1.0)
def test_str_repr(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models)
expected = "SPA(studentization: asymptotic, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
expected = expected[:-1] + ", ID: " + hex(id(spa)) + ")"
assert_equal(spa.__repr__(), expected)
expected = (
"<strong>SPA</strong>("
+ "<strong>studentization</strong>: asymptotic, "
+ "<strong>bootstrap</strong>: "
+ str(spa.bootstrap)
+ ", <strong>ID</strong>: "
+ hex(id(spa))
+ ")"
)
assert_equal(spa._repr_html_(), expected)
spa = SPA(spa_data.benchmark, spa_data.models, studentize=False, bootstrap="cbb")
expected = "SPA(studentization: none, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
spa = SPA(
spa_data.benchmark, spa_data.models, nested=True, bootstrap="moving_block"
)
expected = "SPA(studentization: bootstrap, bootstrap: " + str(spa.bootstrap) + ")"
assert_equal(str(spa), expected)
def test_seed_reset(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, reps=10)
spa.seed(23456)
initial_state = spa.bootstrap.random_state
assert_equal(spa.bootstrap._seed, 23456)
spa.compute()
spa.reset()
assert spa._pvalues == {}
assert_equal(spa.bootstrap.random_state, initial_state)
def test_spa_nested(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, nested=True, reps=100)
spa.compute()
def test_bootstrap_selection(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="sb")
assert isinstance(spa.bootstrap, StationaryBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="cbb")
assert isinstance(spa.bootstrap, CircularBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="circular")
assert isinstance(spa.bootstrap, CircularBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="mbb")
assert isinstance(spa.bootstrap, MovingBlockBootstrap)
spa = SPA(spa_data.benchmark, spa_data.models, bootstrap="moving block")
assert isinstance(spa.bootstrap, MovingBlockBootstrap)
def test_single_model(spa_data):
spa = SPA(spa_data.benchmark, spa_data.models[:, 0])
spa.compute()
spa = SPA(spa_data.benchmark_series, spa_data.models_df.iloc[:, 0])
spa.compute()
class TestStepM(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(23456)
fixed_rng = stats.chi2(10)
cls.t = t = 1000
cls.k = k = 500
cls.benchmark = fixed_rng.rvs(t)
cls.models = fixed_rng.rvs((t, k))
index = pd.date_range("2000-01-01", periods=t)
cls.benchmark_series = pd.Series(cls.benchmark, index=index)
cls.benchmark_df = pd.DataFrame(cls.benchmark, index=index)
cols = ["col_" + str(i) for i in range(cls.k)]
cls.models_df = pd.DataFrame(cls.models, index=index, columns=cols)
def test_equivalence(self):
adj_models = self.models - linspace(-2.0, 2.0, self.k)
stepm = StepM(self.benchmark, adj_models, size=0.20, reps=200)
stepm.seed(23456)
stepm.compute()
adj_models = self.models_df - linspace(-2.0, 2.0, self.k)
stepm_pandas = StepM(self.benchmark_series, adj_models, size=0.20, reps=200)
stepm_pandas.seed(23456)
stepm_pandas.compute()
assert isinstance(stepm_pandas.superior_models, list)
members = adj_models.columns.isin(stepm_pandas.superior_models)
numeric_locs = np.argwhere(members).squeeze()
numeric_locs.sort()
assert_equal(np.array(stepm.superior_models), numeric_locs)
def test_superior_models(self):
adj_models = self.models - linspace(-1.0, 1.0, self.k)
stepm = StepM(self.benchmark, adj_models, reps=120)
stepm.compute()
superior_models = stepm.superior_models
assert len(superior_models) > 0
spa = SPA(self.benchmark, adj_models, reps=120)
spa.compute()
assert isinstance(spa.pvalues, pd.Series)
spa.critical_values(0.05)
spa.better_models(0.05)
adj_models = self.models_df - linspace(-3.0, 3.0, self.k)
stepm = StepM(self.benchmark_series, adj_models, reps=120)
stepm.compute()
superior_models = stepm.superior_models
assert len(superior_models) > 0
def test_str_repr(self):
stepm = StepM(self.benchmark_series, self.models, size=0.10)
expected = (
"StepM(FWER (size): 0.10, studentization: "
"asymptotic, bootstrap: " + str(stepm.spa.bootstrap) + ")"
)
assert_equal(str(stepm), expected)
expected = expected[:-1] + ", ID: " + hex(id(stepm)) + ")"
assert_equal(stepm.__repr__(), expected)
expected = (
"<strong>StepM</strong>("
"<strong>FWER (size)</strong>: 0.10, "
"<strong>studentization</strong>: asymptotic, "
"<strong>bootstrap</strong>: "
+ str(stepm.spa.bootstrap)
+ ", "
+ "<strong>ID</strong>: "
+ hex(id(stepm))
+ ")"
)
assert_equal(stepm._repr_html_(), expected)
stepm = StepM(self.benchmark_series, self.models, size=0.05, studentize=False)
expected = (
"StepM(FWER (size): 0.05, studentization: none, "
"bootstrap: " + str(stepm.spa.bootstrap) + ")"
)
assert_equal(expected, str(stepm))
def test_single_model(self):
stepm = StepM(self.benchmark, self.models[:, 0], size=0.10)
stepm.compute()
stepm = StepM(self.benchmark_series, self.models_df.iloc[:, 0])
stepm.compute()
def test_all_superior(self):
adj_models = self.models - 100.0
stepm = StepM(self.benchmark, adj_models, size=0.10)
stepm.compute()
assert_equal(len(stepm.superior_models), self.models.shape[1])
def test_errors(self):
stepm = StepM(self.benchmark, self.models, size=0.10)
with pytest.raises(RuntimeError):
stepm.superior_models
def test_exact_ties(self):
adj_models: pd.DataFrame = self.models_df - 100.0
adj_models.iloc[:, :2] -= adj_models.iloc[:, :2].mean()
adj_models.iloc[:, :2] += self.benchmark_df.mean().iloc[0]
stepm = StepM(self.benchmark_df, adj_models, size=0.10)
stepm.compute()
assert_equal(len(stepm.superior_models), self.models.shape[1] - 2)
class TestMCS(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(23456)
fixed_rng = stats.chi2(10)
cls.t = t = 1000
cls.k = k = 50
cls.losses = fixed_rng.rvs((t, k))
index = pd.date_range("2000-01-01", periods=t)
cls.losses_df = pd.DataFrame(cls.losses, index=index)
def test_r_method(self):
def r_step(losses, indices):
# A basic but direct implementation of the r method
k = losses.shape[1]
b = len(indices)
mean_diffs = losses.mean(0)
loss_diffs = np.zeros((k, k))
variances = np.zeros((k, k))
bs_diffs = np.zeros(b)
stat_candidates = []
for i in range(k):
for j in range(i, k):
if i == j:
variances[i, i] = 1.0
loss_diffs[i, j] = 0.0
continue
loss_diffs_vec = losses[:, i] - losses[:, j]
loss_diffs_vec = loss_diffs_vec - loss_diffs_vec.mean()
loss_diffs[i, j] = mean_diffs[i] - mean_diffs[j]
loss_diffs[j, i] = mean_diffs[j] - mean_diffs[i]
for n in range(b):
# Compute bootstrapped versions
bs_diffs[n] = loss_diffs_vec[indices[n]].mean()
variances[j, i] = variances[i, j] = (bs_diffs ** 2).mean()
std_diffs = np.abs(bs_diffs) / np.sqrt(variances[i, j])
stat_candidates.append(std_diffs)
stat_candidates = np.array(stat_candidates).T
stat_distn = np.max(stat_candidates, 1)
std_loss_diffs = loss_diffs / np.sqrt(variances)
stat = np.max(std_loss_diffs)
pval = np.mean(stat <= stat_distn)
loc = np.argwhere(std_loss_diffs == stat)
drop_index = loc.flat[0]
return pval, drop_index
losses = self.losses[:, :10] # Limit size
mcs = MCS(losses, 0.05, reps=200)
mcs.seed(23456)
mcs.compute()
m = 5 # Number of direct
pvals = np.zeros(m) * np.nan
indices = np.zeros(m) * np.nan
for i in range(m):
removed = list(indices[np.isfinite(indices)])
include = list(set(list(range(10))).difference(removed))
include.sort()
pval, drop_index = r_step(
losses[:, np.array(include)], mcs._bootstrap_indices
)
pvals[i] = pval if i == 0 else np.max([pvals[i - 1], pval])
indices[i] = include[drop_index]
direct = pd.DataFrame(
pvals, index=np.array(indices, dtype=np.int64), columns=["Pvalue"]
)
direct.index.name = "Model index"
assert_frame_equal(mcs.pvalues.iloc[:m], direct)
def test_max_method(self):
def max_step(losses, indices):
# A basic but direct implementation of the max method
k = losses.shape[1]
b = len(indices)
loss_errors = losses - losses.mean(0)
stats = np.zeros((b, k))
for n in range(b):
# Compute bootstrapped versions
bs_loss_errors = loss_errors[indices[n]]
stats[n] = bs_loss_errors.mean(0) - bs_loss_errors.mean()
variances = (stats ** 2).mean(0)
std_devs = np.sqrt(variances)
stat_dist = np.max(stats / std_devs, 1)
test_stat = losses.mean(0) - losses.mean()
std_test_stat = test_stat / std_devs
test_stat = np.max(std_test_stat)
pval = (test_stat < stat_dist).mean()
drop_index = np.argwhere(std_test_stat == test_stat).squeeze()
return pval, drop_index, std_devs
losses = self.losses[:, :10] # Limit size
mcs = MCS(losses, 0.05, reps=200, method="max")
mcs.seed(23456)
mcs.compute()
m = 8 # Number of direct
pvals = np.zeros(m) * np.nan
indices = np.zeros(m) * np.nan
for i in range(m):
removed = list(indices[np.isfinite(indices)])
include = list(set(list(range(10))).difference(removed))
include.sort()
pval, drop_index, _ = max_step(
losses[:, np.array(include)], mcs._bootstrap_indices
)
pvals[i] = pval if i == 0 else np.max([pvals[i - 1], pval])
indices[i] = include[drop_index]
direct = pd.DataFrame(
pvals, index=np.array(indices, dtype=np.int64), columns=["Pvalue"]
)
direct.index.name = "Model index"
assert_frame_equal(mcs.pvalues.iloc[:m], direct)
def test_output_types(self):
mcs = MCS(self.losses_df, 0.05, reps=100, block_size=10, method="r")
mcs.compute()
assert isinstance(mcs.included, list)
assert isinstance(mcs.excluded, list)
assert isinstance(mcs.pvalues, pd.DataFrame)
def test_mcs_error(self):
mcs = MCS(self.losses_df, 0.05, reps=100, block_size=10, method="r")
with pytest.raises(RuntimeError):
mcs.included
def test_errors(self):
with pytest.raises(ValueError):
MCS(self.losses[:, 1], 0.05)
mcs = MCS(
self.losses,
0.05,
reps=100,
block_size=10,
method="max",
bootstrap="circular",
)
mcs.compute()
mcs = MCS(
self.losses,
0.05,
reps=100,
block_size=10,
method="max",
bootstrap="moving block",
)
mcs.compute()
with pytest.raises(ValueError):
MCS(self.losses, 0.05, bootstrap="unknown")
def test_str_repr(self):
mcs = MCS(self.losses, 0.05)
expected = "MCS(size: 0.05, bootstrap: " + str(mcs.bootstrap) + ")"
assert_equal(str(mcs), expected)
expected = expected[:-1] + ", ID: " + hex(id(mcs)) + ")"
assert_equal(mcs.__repr__(), expected)
expected = (
"<strong>MCS</strong>("
+ "<strong>size</strong>: 0.05, "
+ "<strong>bootstrap</strong>: "
+ str(mcs.bootstrap)
+ ", "
+ "<strong>ID</strong>: "
+ hex(id(mcs))
+ ")"
)
assert_equal(mcs._repr_html_(), expected)
def test_all_models_have_pval(self):
losses = self.losses_df.iloc[:, :20]
mcs = MCS(losses, 0.05, reps=200)
mcs.seed(23456)
mcs.compute()
nan_locs = np.isnan(mcs.pvalues.iloc[:, 0])
assert not nan_locs.any()
def test_exact_ties(self):
losses = self.losses_df.iloc[:, :20].copy()
tied_mean = losses.mean().median()
losses.iloc[:, 10:] -= losses.iloc[:, 10:].mean()
losses.iloc[:, 10:] += tied_mean
mcs = MCS(losses, 0.05, reps=200)
mcs.seed(23456)
mcs.compute()
def test_missing_included_max(self):
losses = self.losses_df.iloc[:, :20].copy()
losses = losses.values + 5 * np.arange(20)[None, :]
mcs = MCS(losses, 0.05, reps=200, method="max")
mcs.seed(23456)
mcs.compute()
assert len(mcs.included) > 0
assert (len(mcs.included) + len(mcs.excluded)) == 20
| [
"numpy.abs",
"numpy.isnan",
"numpy.mean",
"numpy.arange",
"pandas.DataFrame",
"arch.bootstrap.multiple_comparison.SPA",
"arch.bootstrap.multiple_comparison.StepM",
"arch.bootstrap.multiple_comparison.MCS",
"numpy.isfinite",
"numpy.random.RandomState",
"numpy.max",
"pytest.raises",
"numpy.tes... | [((716, 732), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (730, 732), False, 'import pytest\n'), ((759, 777), 'numpy.random.RandomState', 'RandomState', (['(23456)'], {}), '(23456)\n', (770, 777), False, 'from numpy.random import RandomState\n'), ((794, 808), 'scipy.stats.chi2', 'stats.chi2', (['(10)'], {}), '(10)\n', (804, 808), True, 'import scipy.stats as stats\n'), ((914, 952), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'periods': 't'}), "('2000-01-01', periods=t)\n", (927, 952), True, 'import pandas as pd\n'), ((976, 1009), 'pandas.Series', 'pd.Series', (['benchmark'], {'index': 'index'}), '(benchmark, index=index)\n', (985, 1009), True, 'import pandas as pd\n'), ((1029, 1065), 'pandas.DataFrame', 'pd.DataFrame', (['benchmark'], {'index': 'index'}), '(benchmark, index=index)\n', (1041, 1065), True, 'import pandas as pd\n'), ((1082, 1115), 'pandas.DataFrame', 'pd.DataFrame', (['models'], {'index': 'index'}), '(models, index=index)\n', (1094, 1115), True, 'import pandas as pd\n'), ((1273, 1338), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'block_size': '(10)', 'reps': '(100)'}), '(spa_data.benchmark, spa_data.models, block_size=10, reps=100)\n', (1276, 1338), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((1419, 1490), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark_df', 'spa_data.models_df'], {'block_size': '(10)', 'reps': '(100)'}), '(spa_data.benchmark_df, spa_data.models_df, block_size=10, reps=100)\n', (1422, 1490), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((1566, 1616), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['numpy_pvalues', 'pandas_pvalues'], {}), '(numpy_pvalues, pandas_pvalues)\n', (1585, 1616), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((1738, 1797), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'adj_models'], {'block_size': '(10)', 'reps': '(10)'}), '(spa_data.benchmark, adj_models, block_size=10, reps=10)\n', (1741, 1797), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((2003, 2014), 'numpy.zeros', 'np.zeros', (['t'], {}), '(t)\n', (2011, 2014), True, 'import numpy as np\n'), ((2376, 2415), 'numpy.testing.assert_allclose', 'assert_allclose', (['direct_vars', 'variances'], {}), '(direct_vars, variances)\n', (2391, 2415), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2557, 2596), 'numpy.testing.assert_equal', 'assert_equal', (['valid', 'spa._valid_columns'], {}), '(valid, spa._valid_columns)\n', (2569, 2596), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2634, 2712), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'block_size': '(10)', 'reps': '(100)', 'nested': '(True)'}), '(spa_data.benchmark, spa_data.models, block_size=10, reps=100, nested=True)\n', (2637, 2712), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((2928, 2975), 'numpy.testing.assert_allclose', 'assert_allclose', (['bootstrap_variances', 'variances'], {}), '(bootstrap_variances, variances)\n', (2943, 2975), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3029, 3079), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'reps': '(100)'}), '(spa_data.benchmark, spa_data.models, reps=100)\n', (3032, 3079), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((3175, 3200), 'numpy.max', 'np.max', (['simulated_vals', '(0)'], {}), '(simulated_vals, 0)\n', (3181, 3200), True, 'import numpy as np\n'), ((3269, 3307), 'numpy.mean', 'np.mean', (['(max_loss_diff <= max_stats)', '(0)'], {}), '(max_loss_diff <= max_stats, 0)\n', (3276, 3307), True, 'import numpy as np\n'), ((3322, 3380), 'pandas.Series', 'pd.Series', (['pvalues'], {'index': "['lower', 'consistent', 'upper']"}), "(pvalues, index=['lower', 'consistent', 'upper'])\n", (3331, 3380), True, 'import pandas as pd\n'), ((3385, 3426), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['pvalues', 'spa.pvalues'], {}), '(pvalues, spa.pvalues)\n', (3404, 3426), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((3444, 3482), 'numpy.percentile', 'np.percentile', (['max_stats', '(90.0)'], {'axis': '(0)'}), '(max_stats, 90.0, axis=0)\n', (3457, 3482), True, 'import numpy as np\n'), ((3499, 3559), 'pandas.Series', 'pd.Series', (['crit_vals'], {'index': "['lower', 'consistent', 'upper']"}), "(crit_vals, index=['lower', 'consistent', 'upper'])\n", (3508, 3559), True, 'import pandas as pd\n'), ((3661, 3711), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'reps': '(100)'}), '(spa_data.benchmark, spa_data.models, reps=100)\n', (3664, 3711), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((4233, 4273), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {}), '(spa_data.benchmark, spa_data.models)\n', (4236, 4273), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((4817, 4892), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'studentize': '(False)', 'bootstrap': '"""cbb"""'}), "(spa_data.benchmark, spa_data.models, studentize=False, bootstrap='cbb')\n", (4820, 4892), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((5023, 5102), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'nested': '(True)', 'bootstrap': '"""moving_block"""'}), "(spa_data.benchmark, spa_data.models, nested=True, bootstrap='moving_block')\n", (5026, 5102), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((5284, 5333), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'reps': '(10)'}), '(spa_data.benchmark, spa_data.models, reps=10)\n', (5287, 5333), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((5405, 5445), 'numpy.testing.assert_equal', 'assert_equal', (['spa.bootstrap._seed', '(23456)'], {}), '(spa.bootstrap._seed, 23456)\n', (5417, 5445), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5514, 5569), 'numpy.testing.assert_equal', 'assert_equal', (['spa.bootstrap.random_state', 'initial_state'], {}), '(spa.bootstrap.random_state, initial_state)\n', (5526, 5569), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5613, 5676), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'nested': '(True)', 'reps': '(100)'}), '(spa_data.benchmark, spa_data.models, nested=True, reps=100)\n', (5616, 5676), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((5747, 5803), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""sb"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='sb')\n", (5750, 5803), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((5872, 5929), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""cbb"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='cbb')\n", (5875, 5929), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((6001, 6063), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""circular"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='circular')\n", (6004, 6063), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((6135, 6192), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""mbb"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='mbb')\n", (6138, 6192), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((6262, 6328), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""moving block"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='moving block')\n", (6265, 6328), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((6433, 6479), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models[:, 0]'], {}), '(spa_data.benchmark, spa_data.models[:, 0])\n', (6436, 6479), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((6509, 6570), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark_series', 'spa_data.models_df.iloc[:, 0]'], {}), '(spa_data.benchmark_series, spa_data.models_df.iloc[:, 0])\n', (6512, 6570), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((1698, 1727), 'numpy.linspace', 'linspace', (['(-2)', '(0.5)', 'spa_data.k'], {}), '(-2, 0.5, spa_data.k)\n', (1706, 1727), False, 'from numpy import linspace\n'), ((3722, 3749), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3735, 3749), False, 'import pytest\n'), ((3781, 3808), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3794, 3808), False, 'import pytest\n'), ((3849, 3876), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3862, 3876), False, 'import pytest\n'), ((3916, 3941), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3929, 3941), False, 'import pytest\n'), ((3951, 4012), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['spa_data.benchmark', 'spa_data.models'], {'bootstrap': '"""unknown"""'}), "(spa_data.benchmark, spa_data.models, bootstrap='unknown')\n", (3954, 4012), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((4040, 4065), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4053, 4065), False, 'import pytest\n'), ((4125, 4150), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4138, 4150), False, 'import pytest\n'), ((6677, 6695), 'numpy.random.RandomState', 'RandomState', (['(23456)'], {}), '(23456)\n', (6688, 6695), False, 'from numpy.random import RandomState\n'), ((6716, 6730), 'scipy.stats.chi2', 'stats.chi2', (['(10)'], {}), '(10)\n', (6726, 6730), True, 'import scipy.stats as stats\n'), ((6880, 6918), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'periods': 't'}), "('2000-01-01', periods=t)\n", (6893, 6918), True, 'import pandas as pd\n'), ((6950, 6987), 'pandas.Series', 'pd.Series', (['cls.benchmark'], {'index': 'index'}), '(cls.benchmark, index=index)\n', (6959, 6987), True, 'import pandas as pd\n'), ((7015, 7055), 'pandas.DataFrame', 'pd.DataFrame', (['cls.benchmark'], {'index': 'index'}), '(cls.benchmark, index=index)\n', (7027, 7055), True, 'import pandas as pd\n'), ((7135, 7186), 'pandas.DataFrame', 'pd.DataFrame', (['cls.models'], {'index': 'index', 'columns': 'cols'}), '(cls.models, index=index, columns=cols)\n', (7147, 7186), True, 'import pandas as pd\n'), ((7299, 7352), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark', 'adj_models'], {'size': '(0.2)', 'reps': '(200)'}), '(self.benchmark, adj_models, size=0.2, reps=200)\n', (7304, 7352), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((7494, 7554), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_series', 'adj_models'], {'size': '(0.2)', 'reps': '(200)'}), '(self.benchmark_series, adj_models, size=0.2, reps=200)\n', (7499, 7554), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((8020, 8063), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark', 'adj_models'], {'reps': '(120)'}), '(self.benchmark, adj_models, reps=120)\n', (8025, 8063), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((8190, 8231), 'arch.bootstrap.multiple_comparison.SPA', 'SPA', (['self.benchmark', 'adj_models'], {'reps': '(120)'}), '(self.benchmark, adj_models, reps=120)\n', (8193, 8231), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((8452, 8502), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_series', 'adj_models'], {'reps': '(120)'}), '(self.benchmark_series, adj_models, reps=120)\n', (8457, 8502), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((8661, 8712), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_series', 'self.models'], {'size': '(0.1)'}), '(self.benchmark_series, self.models, size=0.1)\n', (8666, 8712), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((9468, 9538), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_series', 'self.models'], {'size': '(0.05)', 'studentize': '(False)'}), '(self.benchmark_series, self.models, size=0.05, studentize=False)\n', (9473, 9538), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((9784, 9834), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark', 'self.models[:, 0]'], {'size': '(0.1)'}), '(self.benchmark, self.models[:, 0], size=0.1)\n', (9789, 9834), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((9877, 9932), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_series', 'self.models_df.iloc[:, 0]'], {}), '(self.benchmark_series, self.models_df.iloc[:, 0])\n', (9882, 9932), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((10048, 10091), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark', 'adj_models'], {'size': '(0.1)'}), '(self.benchmark, adj_models, size=0.1)\n', (10053, 10091), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((10232, 10276), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark', 'self.models'], {'size': '(0.1)'}), '(self.benchmark, self.models, size=0.1)\n', (10237, 10276), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((10591, 10637), 'arch.bootstrap.multiple_comparison.StepM', 'StepM', (['self.benchmark_df', 'adj_models'], {'size': '(0.1)'}), '(self.benchmark_df, adj_models, size=0.1)\n', (10596, 10637), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((10824, 10842), 'numpy.random.RandomState', 'RandomState', (['(23456)'], {}), '(23456)\n', (10835, 10842), False, 'from numpy.random import RandomState\n'), ((10863, 10877), 'scipy.stats.chi2', 'stats.chi2', (['(10)'], {}), '(10)\n', (10873, 10877), True, 'import scipy.stats as stats\n'), ((10985, 11023), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'periods': 't'}), "('2000-01-01', periods=t)\n", (10998, 11023), True, 'import pandas as pd\n'), ((11048, 11085), 'pandas.DataFrame', 'pd.DataFrame', (['cls.losses'], {'index': 'index'}), '(cls.losses, index=index)\n', (11060, 11085), True, 'import pandas as pd\n'), ((12803, 12830), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['losses', '(0.05)'], {'reps': '(200)'}), '(losses, 0.05, reps=200)\n', (12806, 12830), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((13577, 13625), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['mcs.pvalues.iloc[:m]', 'direct'], {}), '(mcs.pvalues.iloc[:m], direct)\n', (13595, 13625), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((14648, 14689), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['losses', '(0.05)'], {'reps': '(200)', 'method': '"""max"""'}), "(losses, 0.05, reps=200, method='max')\n", (14651, 14689), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((15441, 15489), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['mcs.pvalues.iloc[:m]', 'direct'], {}), '(mcs.pvalues.iloc[:m], direct)\n', (15459, 15489), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((15538, 15600), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses_df', '(0.05)'], {'reps': '(100)', 'block_size': '(10)', 'method': '"""r"""'}), "(self.losses_df, 0.05, reps=100, block_size=10, method='r')\n", (15541, 15600), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((15813, 15875), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses_df', '(0.05)'], {'reps': '(100)', 'block_size': '(10)', 'method': '"""r"""'}), "(self.losses_df, 0.05, reps=100, block_size=10, method='r')\n", (15816, 15875), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((16066, 16154), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses', '(0.05)'], {'reps': '(100)', 'block_size': '(10)', 'method': '"""max"""', 'bootstrap': '"""circular"""'}), "(self.losses, 0.05, reps=100, block_size=10, method='max', bootstrap=\n 'circular')\n", (16069, 16154), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((16269, 16361), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses', '(0.05)'], {'reps': '(100)', 'block_size': '(10)', 'method': '"""max"""', 'bootstrap': '"""moving block"""'}), "(self.losses, 0.05, reps=100, block_size=10, method='max', bootstrap=\n 'moving block')\n", (16272, 16361), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((16602, 16624), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses', '(0.05)'], {}), '(self.losses, 0.05)\n', (16605, 16624), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((17298, 17325), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['losses', '(0.05)'], {'reps': '(200)'}), '(losses, 0.05, reps=200)\n', (17301, 17325), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((17391, 17423), 'numpy.isnan', 'np.isnan', (['mcs.pvalues.iloc[:, 0]'], {}), '(mcs.pvalues.iloc[:, 0])\n', (17399, 17423), True, 'import numpy as np\n'), ((17698, 17725), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['losses', '(0.05)'], {'reps': '(200)'}), '(losses, 0.05, reps=200)\n', (17701, 17725), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((17940, 17981), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['losses', '(0.05)'], {'reps': '(200)', 'method': '"""max"""'}), "(losses, 0.05, reps=200, method='max')\n", (17943, 17981), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((7255, 7282), 'numpy.linspace', 'linspace', (['(-2.0)', '(2.0)', 'self.k'], {}), '(-2.0, 2.0, self.k)\n', (7263, 7282), False, 'from numpy import linspace\n'), ((7443, 7470), 'numpy.linspace', 'linspace', (['(-2.0)', '(2.0)', 'self.k'], {}), '(-2.0, 2.0, self.k)\n', (7451, 7470), False, 'from numpy import linspace\n'), ((7857, 7888), 'numpy.array', 'np.array', (['stepm.superior_models'], {}), '(stepm.superior_models)\n', (7865, 7888), True, 'import numpy as np\n'), ((7976, 8003), 'numpy.linspace', 'linspace', (['(-1.0)', '(1.0)', 'self.k'], {}), '(-1.0, 1.0, self.k)\n', (7984, 8003), False, 'from numpy import linspace\n'), ((8408, 8435), 'numpy.linspace', 'linspace', (['(-3.0)', '(3.0)', 'self.k'], {}), '(-3.0, 3.0, self.k)\n', (8416, 8435), False, 'from numpy import linspace\n'), ((10291, 10318), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10304, 10318), False, 'import pytest\n'), ((11343, 11359), 'numpy.zeros', 'np.zeros', (['(k, k)'], {}), '((k, k))\n', (11351, 11359), True, 'import numpy as np\n'), ((11384, 11400), 'numpy.zeros', 'np.zeros', (['(k, k)'], {}), '((k, k))\n', (11392, 11400), True, 'import numpy as np\n'), ((11424, 11435), 'numpy.zeros', 'np.zeros', (['b'], {}), '(b)\n', (11432, 11435), True, 'import numpy as np\n'), ((12433, 12459), 'numpy.max', 'np.max', (['stat_candidates', '(1)'], {}), '(stat_candidates, 1)\n', (12439, 12459), True, 'import numpy as np\n'), ((12540, 12562), 'numpy.max', 'np.max', (['std_loss_diffs'], {}), '(std_loss_diffs)\n', (12546, 12562), True, 'import numpy as np\n'), ((12582, 12609), 'numpy.mean', 'np.mean', (['(stat <= stat_distn)'], {}), '(stat <= stat_distn)\n', (12589, 12609), True, 'import numpy as np\n'), ((12628, 12663), 'numpy.argwhere', 'np.argwhere', (['(std_loss_diffs == stat)'], {}), '(std_loss_diffs == stat)\n', (12639, 12663), True, 'import numpy as np\n'), ((12927, 12938), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (12935, 12938), True, 'import numpy as np\n'), ((12966, 12977), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (12974, 12977), True, 'import numpy as np\n'), ((13894, 13910), 'numpy.zeros', 'np.zeros', (['(b, k)'], {}), '((b, k))\n', (13902, 13910), True, 'import numpy as np\n'), ((14189, 14207), 'numpy.sqrt', 'np.sqrt', (['variances'], {}), '(variances)\n', (14196, 14207), True, 'import numpy as np\n'), ((14232, 14259), 'numpy.max', 'np.max', (['(stats / std_devs)', '(1)'], {}), '(stats / std_devs, 1)\n', (14238, 14259), True, 'import numpy as np\n'), ((14389, 14410), 'numpy.max', 'np.max', (['std_test_stat'], {}), '(std_test_stat)\n', (14395, 14410), True, 'import numpy as np\n'), ((14786, 14797), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (14794, 14797), True, 'import numpy as np\n'), ((14825, 14836), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (14833, 14836), True, 'import numpy as np\n'), ((15889, 15916), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15902, 15916), False, 'import pytest\n'), ((15984, 16009), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15997, 16009), False, 'import pytest\n'), ((16023, 16051), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses[:, 1]', '(0.05)'], {}), '(self.losses[:, 1], 0.05)\n', (16026, 16051), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((16475, 16500), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16488, 16500), False, 'import pytest\n'), ((16514, 16557), 'arch.bootstrap.multiple_comparison.MCS', 'MCS', (['self.losses', '(0.05)'], {'bootstrap': '"""unknown"""'}), "(self.losses, 0.05, bootstrap='unknown')\n", (16517, 16557), False, 'from arch.bootstrap.multiple_comparison import MCS, SPA, StepM\n'), ((7777, 7797), 'numpy.argwhere', 'np.argwhere', (['members'], {}), '(members)\n', (7788, 7797), True, 'import numpy as np\n'), ((12380, 12405), 'numpy.array', 'np.array', (['stat_candidates'], {}), '(stat_candidates)\n', (12388, 12405), True, 'import numpy as np\n'), ((12502, 12520), 'numpy.sqrt', 'np.sqrt', (['variances'], {}), '(variances)\n', (12509, 12520), True, 'import numpy as np\n'), ((13333, 13361), 'numpy.max', 'np.max', (['[pvals[i - 1], pval]'], {}), '([pvals[i - 1], pval])\n', (13339, 13361), True, 'import numpy as np\n'), ((13463, 13496), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int64'}), '(indices, dtype=np.int64)\n', (13471, 13496), True, 'import numpy as np\n'), ((15197, 15225), 'numpy.max', 'np.max', (['[pvals[i - 1], pval]'], {}), '([pvals[i - 1], pval])\n', (15203, 15225), True, 'import numpy as np\n'), ((15327, 15360), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int64'}), '(indices, dtype=np.int64)\n', (15335, 15360), True, 'import numpy as np\n'), ((2488, 2497), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (2494, 2497), True, 'import numpy as np\n'), ((13049, 13069), 'numpy.isfinite', 'np.isfinite', (['indices'], {}), '(indices)\n', (13060, 13069), True, 'import numpy as np\n'), ((14486, 14525), 'numpy.argwhere', 'np.argwhere', (['(std_test_stat == test_stat)'], {}), '(std_test_stat == test_stat)\n', (14497, 14525), True, 'import numpy as np\n'), ((14908, 14928), 'numpy.isfinite', 'np.isfinite', (['indices'], {}), '(indices)\n', (14919, 14928), True, 'import numpy as np\n'), ((17903, 17916), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (17912, 17916), True, 'import numpy as np\n'), ((12252, 12268), 'numpy.abs', 'np.abs', (['bs_diffs'], {}), '(bs_diffs)\n', (12258, 12268), True, 'import numpy as np\n'), ((12271, 12295), 'numpy.sqrt', 'np.sqrt', (['variances[i, j]'], {}), '(variances[i, j])\n', (12278, 12295), True, 'import numpy as np\n'), ((13233, 13250), 'numpy.array', 'np.array', (['include'], {}), '(include)\n', (13241, 13250), True, 'import numpy as np\n'), ((15097, 15114), 'numpy.array', 'np.array', (['include'], {}), '(include)\n', (15105, 15114), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import os
import time
import tables
from faster_particles.display_utils import display_uresnet
from faster_particles.base_net import basenets
from faster_particles.metrics import UResNetMetrics
from faster_particles.demo_ppn import get_data, load_weights
def inference(cfg, is_testing=False):
"""
Inference for either PPN or (xor) base network (e.g. UResNet)
"""
if not os.path.isdir(cfg.DISPLAY_DIR):
os.makedirs(cfg.DISPLAY_DIR)
if is_testing:
_, data = get_data(cfg)
else:
data, _ = get_data(cfg)
net = basenets[cfg.BASE_NET](cfg=cfg)
if cfg.WEIGHTS_FILE_PPN is None and cfg.WEIGHTS_FILE_BASE is None:
raise Exception("Need a checkpoint file")
net.init_placeholders()
net.create_architecture(is_training=False)
duration = 0
metrics = UResNetMetrics(cfg)
FILTERS = tables.Filters(complevel=5, complib='zlib', shuffle=True,
bitshuffle=False, fletcher32=False,
least_significant_digit=None)
f_submission = tables.open_file('/data/codalab/submission_5-6.hdf5', 'w',
filters=FILTERS)
preds_array = f_submission.create_earray('/', 'pred', tables.UInt32Atom(),
(0, 192, 192, 192),
expectedrows=data.n)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
load_weights(cfg, sess)
for i in range(min(data.n, cfg.MAX_STEPS)):
print("%d/%d" % (i, data.n))
blob = data.forward()
if is_testing:
blob['labels'] = blob['data'][..., 0]
start = time.time()
summary, results = net.test_image(sess, blob)
end = time.time()
duration += end - start
# Drawing time
# display_uresnet(blob, cfg, index=i, **results)
if not is_testing:
metrics.add(blob, results)
mask = np.where(blob['data'][..., 0] > 0)
preds = np.reshape(results['predictions'], (1, 192, 192, 192))
print(np.count_nonzero(preds[mask] > 0))
preds[mask] = 0
preds_array.append(preds)
print(preds.shape)
preds_array.close()
f_submission.close()
duration /= cfg.MAX_STEPS
print("Average duration of inference = %f ms" % duration)
if not is_testing:
metrics.plot()
if __name__ == '__main__':
class MyCfg(object):
IMAGE_SIZE = 192
BASE_NET = 'uresnet'
NET = 'base'
ENABLE_CROP = False
SLICE_SIZE = 64
MAX_STEPS = 1
CROP_ALGO = 'proba'
DISPLAY_DIR = 'display/demo_codalab1'
WEIGHTS_FILE_BASE = '/data/train_codalab1/model-145000.ckpt'
DATA = '/data/codalab/train_5-6.csv'
TEST_DATA = '/data/codalab/test_5-6.csv'
DATA_TYPE = 'csv'
GPU = '0'
TOYDATA = False
HDF5 = True
DATA_3D = True
SEED = 123
NUM_CLASSES = 3
LEARNING_RATE = 0.001
BASE_NUM_OUTPUTS = 16
WEIGHTS_FILE_PPN = None
URESNET_WEIGHTING = False
URESNET_ADD = False
PPN2_INDEX = 3
PPN1_INDEX = 1
NUM_STRIDES = 3
cfg = MyCfg()
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
inference(cfg, is_testing=True)
| [
"numpy.count_nonzero",
"os.makedirs",
"faster_particles.demo_ppn.load_weights",
"os.path.isdir",
"faster_particles.demo_ppn.get_data",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"time.time",
"tables.Filters",
"numpy.where",
"numpy.reshape",
"faster_particles.metrics.UResN... | [((977, 996), 'faster_particles.metrics.UResNetMetrics', 'UResNetMetrics', (['cfg'], {}), '(cfg)\n', (991, 996), False, 'from faster_particles.metrics import UResNetMetrics\n'), ((1011, 1138), 'tables.Filters', 'tables.Filters', ([], {'complevel': '(5)', 'complib': '"""zlib"""', 'shuffle': '(True)', 'bitshuffle': '(False)', 'fletcher32': '(False)', 'least_significant_digit': 'None'}), "(complevel=5, complib='zlib', shuffle=True, bitshuffle=False,\n fletcher32=False, least_significant_digit=None)\n", (1025, 1138), False, 'import tables\n'), ((1212, 1287), 'tables.open_file', 'tables.open_file', (['"""/data/codalab/submission_5-6.hdf5"""', '"""w"""'], {'filters': 'FILTERS'}), "('/data/codalab/submission_5-6.hdf5', 'w', filters=FILTERS)\n", (1228, 1287), False, 'import tables\n'), ((542, 572), 'os.path.isdir', 'os.path.isdir', (['cfg.DISPLAY_DIR'], {}), '(cfg.DISPLAY_DIR)\n', (555, 572), False, 'import os\n'), ((582, 610), 'os.makedirs', 'os.makedirs', (['cfg.DISPLAY_DIR'], {}), '(cfg.DISPLAY_DIR)\n', (593, 610), False, 'import os\n'), ((649, 662), 'faster_particles.demo_ppn.get_data', 'get_data', (['cfg'], {}), '(cfg)\n', (657, 662), False, 'from faster_particles.demo_ppn import get_data, load_weights\n'), ((691, 704), 'faster_particles.demo_ppn.get_data', 'get_data', (['cfg'], {}), '(cfg)\n', (699, 704), False, 'from faster_particles.demo_ppn import get_data, load_weights\n'), ((1382, 1401), 'tables.UInt32Atom', 'tables.UInt32Atom', ([], {}), '()\n', (1399, 1401), False, 'import tables\n'), ((1544, 1556), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1554, 1556), True, 'import tensorflow as tf\n'), ((1626, 1649), 'faster_particles.demo_ppn.load_weights', 'load_weights', (['cfg', 'sess'], {}), '(cfg, sess)\n', (1638, 1649), False, 'from faster_particles.demo_ppn import get_data, load_weights\n'), ((1583, 1616), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1614, 1616), True, 'import tensorflow as tf\n'), ((1878, 1889), 'time.time', 'time.time', ([], {}), '()\n', (1887, 1889), False, 'import time\n'), ((1966, 1977), 'time.time', 'time.time', ([], {}), '()\n', (1975, 1977), False, 'import time\n'), ((2195, 2229), 'numpy.where', 'np.where', (["(blob['data'][..., 0] > 0)"], {}), "(blob['data'][..., 0] > 0)\n", (2203, 2229), True, 'import numpy as np\n'), ((2250, 2304), 'numpy.reshape', 'np.reshape', (["results['predictions']", '(1, 192, 192, 192)'], {}), "(results['predictions'], (1, 192, 192, 192))\n", (2260, 2304), True, 'import numpy as np\n'), ((2323, 2356), 'numpy.count_nonzero', 'np.count_nonzero', (['(preds[mask] > 0)'], {}), '(preds[mask] > 0)\n', (2339, 2356), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from helper import bezier
from mpl_toolkits.mplot3d import Axes3D
# Blended Wing Body has two scenarios about construction
# 1. establishment based on existing normal shape aircraft
# 2. new establishment
# 1 is necessary for completing the value of bottom area
# baseline normal
huc = 1.8 # upper cockpit height
hlc = 1.8 # lower cockpit height
wc = 1.2 # cockpit width
huf = 2.0 # upper fuselage height (radius)
wf = 2.0 # fuselage width (radius)
hlf = 2.1 # lower fuselage height(fuselage), optimize it from cargo volume
upper_r = huf / wf # the ratio of height and width at upper part
lower_r = hlf / wf # the ratio of height and width at lower part
hau = 1.5 # height of after cabin upper
wa = 0.3 # after cabin width
l1 = 7 # up to section 1 length
l2 = 25 # up to section 2 length
l3 = 4 # up to section 3 length
l = l1 + l2 + l3 # total fuselage length
# Bottom area
BSn = (wc + wf) * l1 + 2 * wf * l2 + (wa + wf) * l3
# parameters u => width, v = length
u1 = 0.1
v1 = 0.3
u2 = 0.3
v2 = 0.5
u3 = 0.1
v3 = 1.0 - v1 - v2
# Tuning length(if you use 2 case, you skip this process)
lb = l
lb_step = 1.0
diff_s = 0.0
diff_s_old = 0.0
count = 0
while True:
if count == 100:
break
BSb = (u1 * v1 + u2 * v1 + 2 * u2 * v2 + u3 * v3 + u2 * v3) * lb ** 2
diff_s = (BSb - BSn) / BSn
# print('residual:', diff_s, 'length bwb:', lb)
if abs(diff_s) < 1.0e-5:
# print('ok')
break
if diff_s * diff_s_old < 0.0:
lb_step *= 0.5
lb += -np.sign(diff_s) * lb_step
diff_s_old = diff_s
count += 1
print(lb)
# determine p, m
def rho(p, k2):
return p ** 2 * ((1 - 2 * p) + 2 * p * k2 - k2 ** 2)
def lho(p, k1):
return (1 - p) ** 2 * (2 * p * k1 - k1 ** 2)
l1 = v1 * lb
l2 = v2 * lb
k1 = l1 / lb
k2 = (l1 + l2) / lb
# airfoil parameters
pb = v1 / v2
mb = pb ** 2 * huf / (2 * pb * k1 - k1 ** 2)
xk = np.linspace(0, 1, 30)
x = []
z = []
for xi in xk:
if xi < pb:
zi = mb * (2 * pb * xi - xi ** 2) / pb ** 2
xi *= lb
else:
zi = mb * ((1 - 2 * pb) + 2 * pb * xi - xi ** 2) / (1 - pb) ** 2
xi *= lb
z.append(zi)
x.append(xi)
# main wing shape
ctip = 1
croot = 3
b = 40
theta = 20 # retreat angle
p = pb
tc = 0.1
jmx = 0.5 # main wing joint poiny
st = [v1 * lb, u2 * lb]
BX = croot * (b / 2 - st[1]) / (croot - ctip)
main_wing_arr = []
y = np.linspace(st[1], b * 0.5, 30)
for yi in y:
xu = st[0] + (yi - st[1]) * np.tan(theta * np.pi / 180.0)
cx = croot * (BX + st[1] - yi) / BX
xl = xu + cx
x = np.linspace(xu, xl, 30)
for xi in x:
zui = -tc / (p * (1 - p) * cx) * (xi - xu) * (xi - xl)
zli = -1 * zui
main_wing_arr.append([xi, yi, zui])
main_wing_arr.append([xi, yi, zli])
main_wing_arr.append([xi, -yi, zui])
main_wing_arr.append([xi, -yi, zli])
main_wing_arr = np.array(main_wing_arr)
# horizontal face
bezier_y1 = []
qy1 = np.array([[0, 0], [0, u1 * lb], [v1 * lb, u2 * lb],
[(b * 0.5 - st[1]) * np.tan(theta * np.pi / 180.0) + st[0], b * 0.5]])
bezier_y2 = []
qy2 = np.array([[(b * 0.5 - st[1]) * np.tan(theta * np.pi / 180.0) + st[0] + ctip, b * 0.5],
[croot + st[0], st[1]], [lb, u3 * lb], [lb, 0]])
for t in np.linspace(0, 1, 50):
bezier_y1.append(bezier(qy1.shape[0] - 1, t, qy1))
bezier_y2.append(bezier(qy2.shape[0] - 1, t, qy2))
xs = (b * 0.5 - st[1]) * np.tan(theta * np.pi / 180.0) + st[0]
xf = xs + ctip
interpolates = [np.array([xi, b * 0.5]) for xi in np.linspace(xs, xf, 5)]
# y coordinates
bezier_y = bezier_y1 + interpolates + bezier_y2
fuselage_arr = []
# xz plane
fuselage_line = []
for xi, yu in bezier_y:
xi /= lb
if xi < pb:
zu = mb * (2 * pb * xi - xi ** 2) / pb ** 2
else:
zu = mb * ((1 - 2 * pb) + 2 * pb * xi - xi ** 2) / (1 - pb) ** 2
xi *= lb
fuselage_line.append([xi, zu])
y = np.linspace(-yu, yu, 50)
for yi in y:
zui = zu * np.sqrt(1.0 - yi ** 2 / yu ** 2)
zli = -1 * zui
fuselage_arr.append([xi, yi, zui])
fuselage_arr.append([xi, yi, zli])
fuselage_arr = np.array(fuselage_arr)
fuselage_line = np.array(fuselage_line)
# engine(core)(main wing down)
lower = -1
rin = 0.8
rout = 0.4
tin = 0.1
len = 4.0
tx = 0.4
ty = 0.4
k = 0.4
# joint point of main wing lower
joint_point_ml = [st[0] + croot * tx, st[1] + (b / 2 - st[1]) * ty, lower * np.max(main_wing_arr[:, 2])]
zcen = joint_point_ml[2] - tin - rin
# engine curve -> z = ax ** 2 + b * x + c
x = np.linspace(joint_point_ml[0] - k * len, joint_point_ml[0] + (1 - k) * len, 30)
az = lower * (rin - rout) / (1 - 2 * k) / len ** 2
bz = -2 * joint_point_ml[0] * az
cz = joint_point_ml[2] + bz ** 2 / (4 * az)
engine_arr_low = []
for xi in x:
zu = az * xi ** 2 + bz * xi + cz
zl = 2 * zcen - zu
z = np.linspace(zl, zu, 30)
for zi in z:
target = np.sqrt((zu - zcen) ** 2 - (zi - zcen) ** 2)
yui = joint_point_ml[1] + target
yli = joint_point_ml[1] - target
engine_arr_low.append([xi, yui, zi])
engine_arr_low.append([xi, yli, zi])
engine_arr_low.append([xi, -yui, zi])
engine_arr_low.append([xi, -yli, zi])
engine_arr_low = np.array(engine_arr_low)
# engine(distributed fan)(upper)
nfan = 4
rfin = 0.6
rfout = 0.4
lower = -1
r_afford = 0.1
theta = theta # retreat angle
tin = 0.1
lfan = 2.0
k = 0.1
joint_point_init = joint_point_ml
engine_arr_dist_up = []
for n in range(nfan):
diff_r = (1.0 + r_afford) * 2 * (n + 1)
joint_point = [joint_point_init[0] + diff_r * np.sin(theta * np.pi / 180.0), joint_point_init[1] + diff_r * np.cos(theta * np.pi / 180.0), joint_point_init[2]]
zcen = joint_point[2] + tin + rin
# engine curve -> z = ax ** 2 + b * x + c
x = np.linspace(joint_point[0] - k * lfan, joint_point[0] + (1 - k) * lfan, 30)
az = (rfin - rfout) / (1 - 2 * k) / lfan ** 2
bz = -2 * joint_point[0] * az
cz = joint_point[2] + bz ** 2 / (4 * az)
for xi in x:
zu = az * xi ** 2 + bz * xi + cz
zl = 2 * zcen - zu
z = np.linspace(zl, zu, 30)
for zi in z:
target = np.sqrt((zu - zcen) ** 2 - (zi - zcen) ** 2)
yui = joint_point[1] + target
yli = joint_point[1] - target
engine_arr_dist_up.append([xi, yui, zi])
engine_arr_dist_up.append([xi, yli, zi])
engine_arr_dist_up.append([xi, -yui, zi])
engine_arr_dist_up.append([xi, -yli, zi])
engine_arr_dist_up = np.array(engine_arr_dist_up)
# engine(distributed fan)(fuselage upper)
nfan = 7
rfin = 0.5
rfout = 0.3
lfan = 1.0
r_afford = 0.1
# mounting position coefficient
tx = 0.7
ty = 0.05
# setting angle
set_angle = -10
# fuselage line fitting
res = np.polyfit(fuselage_line[:, 0], fuselage_line[:, 1], 2)
joint_point_init = [tx * lb, ty * lb, np.poly1d(res)(tx * lb)]
engine_arr_dist_fus = []
for n in range(nfan):
diff_r = (1.0 + r_afford) * 2 * (n + 1)
joint_point = [joint_point_init[0] + diff_r * np.sin(set_angle * np.pi / 180.0), joint_point_init[1] + diff_r * np.cos(set_angle * np.pi / 180.0), joint_point_init[2]]
zcen = joint_point[2] + tin + rin
# engine curve -> z = ax ** 2 + b * x + c
x = np.linspace(joint_point[0] - k * lfan, joint_point[0] + (1 - k) * lfan, 30)
az = (rfin - rfout) / (1 - 2 * k) / lfan ** 2
bz = -2 * joint_point[0] * az
cz = joint_point[2] + bz ** 2 / (4 * az)
for xi in x:
zu = az * xi ** 2 + bz * xi + cz
zl = 2 * zcen - zu
z = np.linspace(zl, zu, 30)
for zi in z:
target = np.sqrt((zu - zcen) ** 2 - (zi - zcen) ** 2)
yui = joint_point[1] + target
yli = joint_point[1] - target
engine_arr_dist_fus.append([xi, yui, zi])
engine_arr_dist_fus.append([xi, yli, zi])
engine_arr_dist_fus.append([xi, -yui, zi])
engine_arr_dist_fus.append([xi, -yli, zi])
engine_arr_dist_fus = np.array(engine_arr_dist_fus)
# judge feasibility(?)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(fuselage_arr[:, 0], fuselage_arr[:, 1], fuselage_arr[:, 2])
ax.scatter(main_wing_arr[:, 0], main_wing_arr[:, 1], main_wing_arr[:, 2])
ax.scatter(engine_arr_low[:, 0], engine_arr_low[:, 1], engine_arr_low[:, 2])
ax.scatter(engine_arr_dist_up[:, 0], engine_arr_dist_up[:, 1], engine_arr_dist_up[:, 2])
# ax.scatter(engine_arr_dist_fus[:, 0], engine_arr_dist_fus[:, 1], engine_arr_dist_fus[:, 2])
ax.set_xlim([-10, 20])
ax.set_ylim([-20, 20])
ax.set_zlim([-15, 15])
plt.show()
| [
"helper.bezier",
"numpy.poly1d",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.polyfit",
"matplotlib.pyplot.figure",
"numpy.tan",
"numpy.array",
"numpy.max",
"numpy.linspace",
"numpy.sign",
"numpy.sin",
"numpy.cos",
"numpy.sqrt"
] | [((1942, 1963), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(30)'], {}), '(0, 1, 30)\n', (1953, 1963), True, 'import numpy as np\n'), ((2438, 2469), 'numpy.linspace', 'np.linspace', (['st[1]', '(b * 0.5)', '(30)'], {}), '(st[1], b * 0.5, 30)\n', (2449, 2469), True, 'import numpy as np\n'), ((2936, 2959), 'numpy.array', 'np.array', (['main_wing_arr'], {}), '(main_wing_arr)\n', (2944, 2959), True, 'import numpy as np\n'), ((3324, 3345), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (3335, 3345), True, 'import numpy as np\n'), ((4194, 4216), 'numpy.array', 'np.array', (['fuselage_arr'], {}), '(fuselage_arr)\n', (4202, 4216), True, 'import numpy as np\n'), ((4233, 4256), 'numpy.array', 'np.array', (['fuselage_line'], {}), '(fuselage_line)\n', (4241, 4256), True, 'import numpy as np\n'), ((4594, 4673), 'numpy.linspace', 'np.linspace', (['(joint_point_ml[0] - k * len)', '(joint_point_ml[0] + (1 - k) * len)', '(30)'], {}), '(joint_point_ml[0] - k * len, joint_point_ml[0] + (1 - k) * len, 30)\n', (4605, 4673), True, 'import numpy as np\n'), ((5297, 5321), 'numpy.array', 'np.array', (['engine_arr_low'], {}), '(engine_arr_low)\n', (5305, 5321), True, 'import numpy as np\n'), ((6604, 6632), 'numpy.array', 'np.array', (['engine_arr_dist_up'], {}), '(engine_arr_dist_up)\n', (6612, 6632), True, 'import numpy as np\n'), ((6852, 6907), 'numpy.polyfit', 'np.polyfit', (['fuselage_line[:, 0]', 'fuselage_line[:, 1]', '(2)'], {}), '(fuselage_line[:, 0], fuselage_line[:, 1], 2)\n', (6862, 6907), True, 'import numpy as np\n'), ((8076, 8105), 'numpy.array', 'np.array', (['engine_arr_dist_fus'], {}), '(engine_arr_dist_fus)\n', (8084, 8105), True, 'import numpy as np\n'), ((8138, 8150), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8148, 8150), True, 'import matplotlib.pyplot as plt\n'), ((8156, 8167), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (8162, 8167), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((8645, 8655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8653, 8655), True, 'import matplotlib.pyplot as plt\n'), ((2612, 2635), 'numpy.linspace', 'np.linspace', (['xu', 'xl', '(30)'], {}), '(xu, xl, 30)\n', (2623, 2635), True, 'import numpy as np\n'), ((3552, 3575), 'numpy.array', 'np.array', (['[xi, b * 0.5]'], {}), '([xi, b * 0.5])\n', (3560, 3575), True, 'import numpy as np\n'), ((3973, 3997), 'numpy.linspace', 'np.linspace', (['(-yu)', 'yu', '(50)'], {}), '(-yu, yu, 50)\n', (3984, 3997), True, 'import numpy as np\n'), ((4909, 4932), 'numpy.linspace', 'np.linspace', (['zl', 'zu', '(30)'], {}), '(zl, zu, 30)\n', (4920, 4932), True, 'import numpy as np\n'), ((5864, 5939), 'numpy.linspace', 'np.linspace', (['(joint_point[0] - k * lfan)', '(joint_point[0] + (1 - k) * lfan)', '(30)'], {}), '(joint_point[0] - k * lfan, joint_point[0] + (1 - k) * lfan, 30)\n', (5875, 5939), True, 'import numpy as np\n'), ((7331, 7406), 'numpy.linspace', 'np.linspace', (['(joint_point[0] - k * lfan)', '(joint_point[0] + (1 - k) * lfan)', '(30)'], {}), '(joint_point[0] - k * lfan, joint_point[0] + (1 - k) * lfan, 30)\n', (7342, 7406), True, 'import numpy as np\n'), ((3368, 3400), 'helper.bezier', 'bezier', (['(qy1.shape[0] - 1)', 't', 'qy1'], {}), '(qy1.shape[0] - 1, t, qy1)\n', (3374, 3400), False, 'from helper import bezier\n'), ((3423, 3455), 'helper.bezier', 'bezier', (['(qy2.shape[0] - 1)', 't', 'qy2'], {}), '(qy2.shape[0] - 1, t, qy2)\n', (3429, 3455), False, 'from helper import bezier\n'), ((3483, 3512), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (3489, 3512), True, 'import numpy as np\n'), ((3586, 3608), 'numpy.linspace', 'np.linspace', (['xs', 'xf', '(5)'], {}), '(xs, xf, 5)\n', (3597, 3608), True, 'import numpy as np\n'), ((4480, 4507), 'numpy.max', 'np.max', (['main_wing_arr[:, 2]'], {}), '(main_wing_arr[:, 2])\n', (4486, 4507), True, 'import numpy as np\n'), ((4968, 5012), 'numpy.sqrt', 'np.sqrt', (['((zu - zcen) ** 2 - (zi - zcen) ** 2)'], {}), '((zu - zcen) ** 2 - (zi - zcen) ** 2)\n', (4975, 5012), True, 'import numpy as np\n'), ((6171, 6194), 'numpy.linspace', 'np.linspace', (['zl', 'zu', '(30)'], {}), '(zl, zu, 30)\n', (6182, 6194), True, 'import numpy as np\n'), ((6947, 6961), 'numpy.poly1d', 'np.poly1d', (['res'], {}), '(res)\n', (6956, 6961), True, 'import numpy as np\n'), ((7638, 7661), 'numpy.linspace', 'np.linspace', (['zl', 'zu', '(30)'], {}), '(zl, zu, 30)\n', (7649, 7661), True, 'import numpy as np\n'), ((1562, 1577), 'numpy.sign', 'np.sign', (['diff_s'], {}), '(diff_s)\n', (1569, 1577), True, 'import numpy as np\n'), ((2516, 2545), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (2522, 2545), True, 'import numpy as np\n'), ((4034, 4066), 'numpy.sqrt', 'np.sqrt', (['(1.0 - yi ** 2 / yu ** 2)'], {}), '(1.0 - yi ** 2 / yu ** 2)\n', (4041, 4066), True, 'import numpy as np\n'), ((6238, 6282), 'numpy.sqrt', 'np.sqrt', (['((zu - zcen) ** 2 - (zi - zcen) ** 2)'], {}), '((zu - zcen) ** 2 - (zi - zcen) ** 2)\n', (6245, 6282), True, 'import numpy as np\n'), ((7705, 7749), 'numpy.sqrt', 'np.sqrt', (['((zu - zcen) ** 2 - (zi - zcen) ** 2)'], {}), '((zu - zcen) ** 2 - (zi - zcen) ** 2)\n', (7712, 7749), True, 'import numpy as np\n'), ((5657, 5686), 'numpy.sin', 'np.sin', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (5663, 5686), True, 'import numpy as np\n'), ((5719, 5748), 'numpy.cos', 'np.cos', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (5725, 5748), True, 'import numpy as np\n'), ((7116, 7149), 'numpy.sin', 'np.sin', (['(set_angle * np.pi / 180.0)'], {}), '(set_angle * np.pi / 180.0)\n', (7122, 7149), True, 'import numpy as np\n'), ((7182, 7215), 'numpy.cos', 'np.cos', (['(set_angle * np.pi / 180.0)'], {}), '(set_angle * np.pi / 180.0)\n', (7188, 7215), True, 'import numpy as np\n'), ((3090, 3119), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (3096, 3119), True, 'import numpy as np\n'), ((3193, 3222), 'numpy.tan', 'np.tan', (['(theta * np.pi / 180.0)'], {}), '(theta * np.pi / 180.0)\n', (3199, 3222), True, 'import numpy as np\n')] |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from generation.at_types.constant_info import ConstantInfo
from generation.at_types.tc_arg_info import GlobalArgInfo, GlobalResetArgInfo
from generation.generator_decorators import QREC_MULT8, generation_function
from graph.types import GRUParameters, LSTMParameters, RNNParameters
from quantization.qtype import QType
from quantization.symmetric.kernels.rnn import internal_qtype
from .global_names import *
from .mult8_infos_generator import gen_constant
@generation_function("globals",
(RNNParameters, LSTMParameters, GRUParameters),
qrec_types=(QREC_MULT8,))
def mult8_rnn_infos_generator(gen, node, qrec, pnode, fnode) -> bool:
del pnode
if fnode is not None:
return False
if isinstance(node, RNNParameters):
rnn_infos(gen, node, qrec)
elif isinstance(node, LSTMParameters):
lstm_infos(gen, node, qrec)
elif isinstance(node, GRUParameters):
gru_infos(gen, node, qrec)
else:
raise ValueError()
if node.rnn_states_as_inputs:
gen.globals.append(GlobalResetArgInfo(
f"{node.name}_Reset", 'AT_MEM_L2', 'AT_MEM_UNDEF'))
return True
def sigmoid_infos(gate_name, mult_qtype, qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
three = qtype.quantize(np.array([3]))[0]
sixth = qtype.quantize(np.array([1/6]))[0]
six = qtype.quantize(np.array([6]))[0]
actn = qtype.q
comment = str.format("{0}_scale: {1} {0}_scale_n: {2} A0: {3} B0: {4} C0: {5}",
gate_name, scale, scale_n, six, three, sixth, 1, actn)
contents = np.array([scale, scale_n, six, three,
sixth, 1, actn], dtype=np.int8)
return contents, comment
def htanh_infos(gate_name, mult_qtype, qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
one = qtype.quantize(np.array([1]))[0]
comment = str.format("{0}_scale: {1} {0}_scale_n: {2} A0: {3} B0: {4}",
gate_name, scale, scale_n, -one, one)
contents = np.array([scale, scale_n, -one, one], dtype=np.int8)
return contents, comment
def scale_infos(gate_name, mult_qtype):
scale = mult_qtype.qbiases[0]
scale_n = mult_qtype.qnorms[0]
comment = str.format("{0}_scale: {1} {0}_scale_n: {2}",
gate_name, scale, scale_n)
contents = np.array([scale, scale_n], dtype=np.int8)
return contents, comment
LSTM_INFOS_ORDER = {
'f': 'sigmoid',
'i': 'sigmoid',
'c': 'htanh',
'o': 'sigmoid',
}
GRU_INFOS_ORDER = {
'r': 'sigmoid',
'z': 'sigmoid',
'h': 'htanh',
}
INFOS_FUNCS = {
'sigmoid': sigmoid_infos,
'htanh': htanh_infos,
'tanh': htanh_infos,
}
def highb(x):
return (x >> 8) & 0xff
def lowb(x):
return x & 0xff
# define LSTM_F_INF 2
# define LSTM_F_OFF 0
# define LSTM_F_SCALE 0
# define LSTM_F_SCALEN 1
# define LSTM_I_INF 2
# define LSTM_I_OFF (LSTM_F_OFF+LSTM_F_INF)
# define LSTM_I_SCALE (0 + LSTM_I_OFF)
# define LSTM_I_SCALEN (1 + LSTM_I_OFF)
# define LSTM_G_INF 2
# define LSTM_G_OFF (LSTM_I_OFF+LSTM_I_INF)
# define LSTM_G_SCALE (0 + LSTM_G_OFF)
# define LSTM_G_SCALEN (1 + LSTM_G_OFF)
# define LSTM_O_INF 2
# define LSTM_O_OFF (LSTM_G_OFF+LSTM_G_INF)
# define LSTM_O_SCALE (0 + LSTM_O_OFF)
# define LSTM_O_SCALEN (1 + LSTM_O_OFF)
# define LSTM_COUT_INF 6
# define LSTM_COUT_OFF (LSTM_O_OFF+LSTM_O_INF)
# define LSTM_CIN_SCALE (0 + LSTM_COUT_OFF)
# define LSTM_CIN_SCALEN (1 + LSTM_COUT_OFF)
# define LSTM_COUT_SCALE (2 + LSTM_COUT_OFF)
# define LSTM_COUT_SCALEN (3 + LSTM_COUT_OFF)
# define LSTM_OUT_SCALE (4 + LSTM_COUT_OFF)
# define LSTM_OUT_SCALEN (5 + LSTM_COUT_OFF)
# define LSTM_INT_INF 7
# define LSTM_INT_OFF (LSTM_COUT_OFF+LSTM_COUT_INF)
# define LSTM_INT_A0 (0 + LSTM_INT_OFF)
# define LSTM_INT_B0 (2 + LSTM_INT_OFF)
# define LSTM_INT_C0 (4 + LSTM_INT_OFF)
# define LSTM_INT_Q (6 + LSTM_INT_OFF)
# define LSTM_X_IN_INF 7
# define LSTM_X_IN_OFF (LSTM_INT_OFF+LSTM_INT_INF)
# define LSTM_F_IN_SCALE (0 + LSTM_X_IN_OFF)
# define LSTM_F_IN_SCALEN (1 + LSTM_X_IN_OFF)
# define LSTM_I_IN_SCALE (2 + LSTM_X_IN_OFF)
# define LSTM_I_IN_SCALEN (3 + LSTM_X_IN_OFF)
# define LSTM_G_IN_SCALE (4 + LSTM_X_IN_OFF)
# define LSTM_G_IN_SCALEN (5 + LSTM_X_IN_OFF)
# define LSTM_O_IN_SCALE (6 + LSTM_X_IN_OFF)
# define LSTM_O_IN_SCALEN (7 + LSTM_X_IN_OFF)
def lstm_infos(gen, node, qrec):
i_qtype = internal_qtype(qrec)
contents = []
comments = []
for k, v in LSTM_INFOS_ORDER.items():
info, comment = scale_infos(k, qrec.cache["r_2_%s_q" % k])
contents.append(info)
comments.append(comment)
cin_scale = qrec.cache['cell_in_q'].qbiases[0]
cin_scalen = qrec.cache['cell_in_q'].qnorms[0]
cout_scale = qrec.cache['cell_out_q'].qbiases[0]
cout_scalen = qrec.cache['cell_out_q'].qnorms[0]
out_scale = qrec.cache['state_out_q'].qbiases[0]
out_scalen = qrec.cache['state_out_q'].qnorms[0]
comments.append(str.format("cin_scale: {} cin_scale_n: {} cout_scale: {} cout_scale_n: {}",
cin_scale, cin_scalen, cout_scale, cout_scalen,))
comments.append(str.format("out_scale: {} out_scale_n: {}",
out_scale, out_scalen))
contents.append(np.array([cin_scale, cin_scalen, cout_scale, cout_scalen,
out_scale, out_scalen], dtype=np.int8))
three = i_qtype.quantize(np.array([3]))[0]
six = i_qtype.quantize(np.array([6]))[0]
sixth = i_qtype.quantize(np.array([1/6]))[0]
comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
i_qtype.q, six, three, sixth))
contents.append(np.array([lowb(six), highb(six),
lowb(three), highb(three),
lowb(sixth), highb(sixth), i_qtype.q],
dtype=np.int8))
for k in LSTM_INFOS_ORDER.keys():
info, comment = scale_infos(k, qrec.cache["i_2_%s_q" % k])
contents.append(info)
comments.append(comment)
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=" ".join(comments)))
# define RNN_F_INF 8
# define RNN_F_OFF 0
# define RNN_F_SCALE 0
# define RNN_F_SCALEN 1
# define RNN_F_A0 2
# define RNN_F_B0 3
# define RNN_F_IN_SCALE 4
# define RNN_F_IN_SCALEN 5
# define RNN_OUT_SCALE 6
# define RNN_OUT_SCALEN 7
def rnn_infos(gen, node, qrec):
i_state_q = qrec.in_qs[node.INPUT_NAMES.index('i_state')]
contents = []
comments = []
# info for activation (scale the act input to the proper scale)
info, comment = INFOS_FUNCS[node.activation](
"f", qrec.cache['s_2_s_q'], i_state_q)
contents.append(info)
comments.append(comment)
# info for input scaling (only used with non SameInputStateScale kernels)
info, comment = scale_infos("f", qrec.cache["i_2_a_q"])
contents.append(info)
comments.append(comment)
# info for scaling the activation out to out scale (only used for non Hard activations kernels)
info, comment = scale_infos("f", qrec.cache["s_2_o_q"])
contents.append(info)
comments.append(comment)
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=comment))
# define GRU_R_INF 4
# define GRU_R_OFF 0
# define GRU_R_INT_SCALE 0
# define GRU_R_INT_SCALEN 1
# define GRU_R_IN_SCALE 2
# define GRU_R_IN_SCALEN 3
# define GRU_Z_INF 4
# define GRU_Z_OFF (GRU_R_OFF+GRU_R_INF)
# define GRU_Z_INT_SCALE (0 + GRU_Z_OFF)
# define GRU_Z_INT_SCALEN (1 + GRU_Z_OFF)
# define GRU_Z_IN_SCALE (2 + GRU_Z_OFF)
# define GRU_Z_IN_SCALEN (3 + GRU_Z_OFF)
# define GRU_HT_INF 2
# define GRU_HT_OFF (GRU_Z_OFF+GRU_Z_INF)
# define GRU_HT_IN_SCALE (0 + GRU_HT_OFF)
# define GRU_HT_IN_SCALEN (1 + GRU_HT_OFF)
# define GRU_H_INF 2
# define GRU_H_OFF (GRU_HT_OFF+GRU_HT_INF)
# define GRU_H_INT_SCALE (0 + GRU_H_OFF)
# define GRU_H_INT_SCALEN (1 + GRU_H_OFF)
# define GRU_INT_INF 3
# define GRU_INT_OFF (GRU_H_OFF+GRU_H_INF)
# define GRU_INT_A0 (2 + GRU_INT_OFF)
# define GRU_INT_B0 (3 + GRU_INT_OFF)
# define GRU_INT_C0 (4 + GRU_INT_OFF)
# define GRU_CELL_INFOS (GRU_R_INF+GRU_Z_INF+GRU_HT_INF+GRU_H_INF+GRU_INT_INF)
def gru_infos(gen, node, qrec):
i_qtype = internal_qtype(qrec)
contents = []
comments = []
r_to_int_scale = qrec.cache['r_WR_2_int_q'].qbiases[0]
r_to_int_scalen = qrec.cache['r_WR_2_int_q'].qnorms[0]
r_to_in_scale = qrec.cache['i_2_r_WR_q'].qbiases[0]
r_to_in_scalen = qrec.cache['i_2_r_WR_q'].qnorms[0]
z_to_int_scale = qrec.cache['z_WR_2_int_q'].qbiases[0]
z_to_int_scalen = qrec.cache['z_WR_2_int_q'].qnorms[0]
z_to_in_scale = qrec.cache['i_2_z_WR_q'].qbiases[0]
z_to_in_scalen = qrec.cache['i_2_z_WR_q'].qnorms[0]
ht_to_in_scale = qrec.cache['i_2_h_WR_q'].qbiases[0]
ht_to_in_scalen = qrec.cache['i_2_h_WR_q'].qnorms[0]
h_to_int_scale = qrec.cache['h_WR_2_int_q'].qbiases[0]
h_to_int_scalen = qrec.cache['h_WR_2_int_q'].qnorms[0]
# GRU_R_INFOS
comments.append(str.format("r_to_int_scale: {} r_to_int_scalen: {} r_to_in_scale: {} r_to_in_scalen: {}",
r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen,))
contents.append(np.array(
[r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen], dtype=np.int8))
# GRU_Z_INFOS
comments.append(str.format("z_to_int_scale: {} z_to_int_scalen: {} z_to_in_scale: {} z_to_in_scalen: {}",
z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen,))
contents.append(np.array(
[z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen], dtype=np.int8))
# GRU_HT_INFOS
comments.append(str.format("ht_to_in_scale: {} ht_to_in_scalen: {}",
ht_to_in_scale, ht_to_in_scalen,))
contents.append(np.array([ht_to_in_scale, ht_to_in_scalen], dtype=np.int8))
# GRU_H_INFOS
comments.append(str.format("h_to_int_scale: {} h_to_int_scalen: {}",
h_to_int_scale, h_to_int_scalen,))
contents.append(np.array([h_to_int_scale, h_to_int_scalen], dtype=np.int8))
three = i_qtype.quantize(np.array([3]))[0]
six = i_qtype.quantize(np.array([6]))[0]
sixth = i_qtype.quantize(np.array([1/6]))[0]
comments.append(str.format("int_q: {} A0: {} B0: {} C0: {}",
i_qtype.q, six, three, sixth))
contents.append(np.array([lowb(six), highb(six),
lowb(three), highb(three),
lowb(sixth), highb(sixth), i_qtype.q],
dtype=np.int8))
cname, file_name = gen_constant(gen, node, node, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True),
contents=np.hstack(tuple(contents)))
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=" ".join(comments)))
| [
"generation.at_types.tc_arg_info.GlobalArgInfo",
"generation.at_types.tc_arg_info.GlobalResetArgInfo",
"generation.generator_decorators.generation_function",
"numpy.array",
"quantization.symmetric.kernels.rnn.internal_qtype",
"quantization.qtype.QType.Pow2"
] | [((1186, 1294), 'generation.generator_decorators.generation_function', 'generation_function', (['"""globals"""', '(RNNParameters, LSTMParameters, GRUParameters)'], {'qrec_types': '(QREC_MULT8,)'}), "('globals', (RNNParameters, LSTMParameters,\n GRUParameters), qrec_types=(QREC_MULT8,))\n", (1205, 1294), False, 'from generation.generator_decorators import QREC_MULT8, generation_function\n'), ((2346, 2415), 'numpy.array', 'np.array', (['[scale, scale_n, six, three, sixth, 1, actn]'], {'dtype': 'np.int8'}), '([scale, scale_n, six, three, sixth, 1, actn], dtype=np.int8)\n', (2354, 2415), True, 'import numpy as np\n'), ((2785, 2837), 'numpy.array', 'np.array', (['[scale, scale_n, -one, one]'], {'dtype': 'np.int8'}), '([scale, scale_n, -one, one], dtype=np.int8)\n', (2793, 2837), True, 'import numpy as np\n'), ((3105, 3146), 'numpy.array', 'np.array', (['[scale, scale_n]'], {'dtype': 'np.int8'}), '([scale, scale_n], dtype=np.int8)\n', (3113, 3146), True, 'import numpy as np\n'), ((5546, 5566), 'quantization.symmetric.kernels.rnn.internal_qtype', 'internal_qtype', (['qrec'], {}), '(qrec)\n', (5560, 5566), False, 'from quantization.symmetric.kernels.rnn import internal_qtype\n'), ((10586, 10606), 'quantization.symmetric.kernels.rnn.internal_qtype', 'internal_qtype', (['qrec'], {}), '(qrec)\n', (10600, 10606), False, 'from quantization.symmetric.kernels.rnn import internal_qtype\n'), ((6406, 6506), 'numpy.array', 'np.array', (['[cin_scale, cin_scalen, cout_scale, cout_scalen, out_scale, out_scalen]'], {'dtype': 'np.int8'}), '([cin_scale, cin_scalen, cout_scale, cout_scalen, out_scale,\n out_scalen], dtype=np.int8)\n', (6414, 6506), True, 'import numpy as np\n'), ((7299, 7335), 'quantization.qtype.QType.Pow2', 'QType.Pow2', ([], {'bits': '(8)', 'q': '(0)', 'signed': '(True)'}), '(bits=8, q=0, signed=True)\n', (7309, 7335), False, 'from quantization.qtype import QType\n'), ((8949, 8985), 'quantization.qtype.QType.Pow2', 'QType.Pow2', ([], {'bits': '(8)', 'q': '(0)', 'signed': '(True)'}), '(bits=8, q=0, signed=True)\n', (8959, 8985), False, 'from quantization.qtype import QType\n'), ((9078, 9240), 'generation.at_types.tc_arg_info.GlobalArgInfo', 'GlobalArgInfo', (['"""int8"""', 'cname', "gen.opts['default_global_home_location']", "gen.opts['default_global_exec_location']"], {'const_info': 'const_info', 'comment': 'comment'}), "('int8', cname, gen.opts['default_global_home_location'], gen.\n opts['default_global_exec_location'], const_info=const_info, comment=\n comment)\n", (9091, 9240), False, 'from generation.at_types.tc_arg_info import GlobalArgInfo, GlobalResetArgInfo\n'), ((11581, 11674), 'numpy.array', 'np.array', (['[r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen]'], {'dtype': 'np.int8'}), '([r_to_int_scale, r_to_int_scalen, r_to_in_scale, r_to_in_scalen],\n dtype=np.int8)\n', (11589, 11674), True, 'import numpy as np\n'), ((11927, 12020), 'numpy.array', 'np.array', (['[z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen]'], {'dtype': 'np.int8'}), '([z_to_int_scale, z_to_int_scalen, z_to_in_scale, z_to_in_scalen],\n dtype=np.int8)\n', (11935, 12020), True, 'import numpy as np\n'), ((12206, 12264), 'numpy.array', 'np.array', (['[ht_to_in_scale, ht_to_in_scalen]'], {'dtype': 'np.int8'}), '([ht_to_in_scale, ht_to_in_scalen], dtype=np.int8)\n', (12214, 12264), True, 'import numpy as np\n'), ((12444, 12502), 'numpy.array', 'np.array', (['[h_to_int_scale, h_to_int_scalen]'], {'dtype': 'np.int8'}), '([h_to_int_scale, h_to_int_scalen], dtype=np.int8)\n', (12452, 12502), True, 'import numpy as np\n'), ((13100, 13136), 'quantization.qtype.QType.Pow2', 'QType.Pow2', ([], {'bits': '(8)', 'q': '(0)', 'signed': '(True)'}), '(bits=8, q=0, signed=True)\n', (13110, 13136), False, 'from quantization.qtype import QType\n'), ((1793, 1862), 'generation.at_types.tc_arg_info.GlobalResetArgInfo', 'GlobalResetArgInfo', (['f"""{node.name}_Reset"""', '"""AT_MEM_L2"""', '"""AT_MEM_UNDEF"""'], {}), "(f'{node.name}_Reset', 'AT_MEM_L2', 'AT_MEM_UNDEF')\n", (1811, 1862), False, 'from generation.at_types.tc_arg_info import GlobalArgInfo, GlobalResetArgInfo\n'), ((2040, 2053), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (2048, 2053), True, 'import numpy as np\n'), ((2085, 2102), 'numpy.array', 'np.array', (['[1 / 6]'], {}), '([1 / 6])\n', (2093, 2102), True, 'import numpy as np\n'), ((2130, 2143), 'numpy.array', 'np.array', (['[6]'], {}), '([6])\n', (2138, 2143), True, 'import numpy as np\n'), ((2613, 2626), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2621, 2626), True, 'import numpy as np\n'), ((6564, 6577), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (6572, 6577), True, 'import numpy as np\n'), ((6609, 6622), 'numpy.array', 'np.array', (['[6]'], {}), '([6])\n', (6617, 6622), True, 'import numpy as np\n'), ((6656, 6673), 'numpy.array', 'np.array', (['[1 / 6]'], {}), '([1 / 6])\n', (6664, 6673), True, 'import numpy as np\n'), ((12534, 12547), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (12542, 12547), True, 'import numpy as np\n'), ((12579, 12592), 'numpy.array', 'np.array', (['[6]'], {}), '([6])\n', (12587, 12592), True, 'import numpy as np\n'), ((12626, 12643), 'numpy.array', 'np.array', (['[1 / 6]'], {}), '([1 / 6])\n', (12634, 12643), True, 'import numpy as np\n')] |
import gpytorch
import random
import numpy as np
import torch
from scipy.optimize import minimize
class QLearningAgent:
def __init__(self, id, hp, utility_function, num_actions):
self.alpha = hp.alpha
self.epsilon = hp.epsilon
self.utility = utility_function
self.rand_prob = hp.rand_prob
self.num_objectives = 2
self.id = id
self.hp = hp
self.theta = None
self.Q = np.zeros((num_actions, 2))
self.num_actions = num_actions
# epsilon greedy based on nonlinear optimiser mixed strategy search
def act(self, state=None, theta=None):
if random.uniform(0.0, 1.0) < self.epsilon:
action = self.select_random_action()
else:
action = self.select_action_greedy_mixed_nonlinear()
return np.array([action]), theta
def select_random_action(self):
random_action = np.random.randint(self.num_actions)
return random_action
# greedy action selection based on nonlinear optimiser mixed strategy search
def select_action_greedy_mixed_nonlinear(self):
strategy = self.calc_mixed_strategy_nonlinear()
if np.sum(strategy) != 1:
strategy = strategy / np.sum(strategy)
return np.random.choice(range(self.num_actions), p=strategy)
def calc_mixed_strategy_nonlinear(self):
if self.rand_prob:
s0 = np.random.random(self.num_actions)
s0 /= np.sum(s0)
else:
s0 = np.full(self.num_actions,
1.0 / self.num_actions) # initial guess set to equal prob over all actions
b = (0.0, 1.0)
bnds = (b,) * self.num_actions
con1 = {'type': 'eq', 'fun': lambda x: np.sum(x) - 1}
cons = ([con1])
solution = minimize(self.objective, s0, bounds=bnds, constraints=cons)
strategy = solution.x
return strategy
# this is the objective function to be minimised by the nonlinear optimiser
# Calculates the SER for a given strategy using the agent's own Q values
# (it returns the negative of SER)
def objective(self, strategy):
expected_vec = np.zeros(self.num_objectives)
for o in range(self.num_objectives):
expected_vec[o] = np.dot(self.Q[:, o], np.array(strategy))
ser = self.utility(torch.from_numpy(expected_vec)).item()
return - ser
def _apply_discount(self, rewards):
cum_discount = np.cumprod(self.hp.gamma * np.ones(rewards.shape), axis=0) / self.hp.gamma
discounted_rewards = np.sum(rewards * cum_discount, axis=0)
return discounted_rewards
def update(self, actions, payoff, opp_actions=None):
# update Q
actions = np.array(actions)
# average return over rollout;
means = self._apply_discount(np.array(payoff))
for i, act in enumerate(actions[0, :]):
self.Q[act] += self.alpha * (means[:, i] - self.Q[act])
def reset(self):
self.Q = np.zeros((self.num_actions, 2))
self.epsilon = self.hp.epsilon
| [
"numpy.full",
"scipy.optimize.minimize",
"numpy.sum",
"random.uniform",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.array",
"numpy.random.random",
"torch.from_numpy"
] | [((446, 472), 'numpy.zeros', 'np.zeros', (['(num_actions, 2)'], {}), '((num_actions, 2))\n', (454, 472), True, 'import numpy as np\n'), ((910, 945), 'numpy.random.randint', 'np.random.randint', (['self.num_actions'], {}), '(self.num_actions)\n', (927, 945), True, 'import numpy as np\n'), ((1803, 1862), 'scipy.optimize.minimize', 'minimize', (['self.objective', 's0'], {'bounds': 'bnds', 'constraints': 'cons'}), '(self.objective, s0, bounds=bnds, constraints=cons)\n', (1811, 1862), False, 'from scipy.optimize import minimize\n'), ((2173, 2202), 'numpy.zeros', 'np.zeros', (['self.num_objectives'], {}), '(self.num_objectives)\n', (2181, 2202), True, 'import numpy as np\n'), ((2574, 2612), 'numpy.sum', 'np.sum', (['(rewards * cum_discount)'], {'axis': '(0)'}), '(rewards * cum_discount, axis=0)\n', (2580, 2612), True, 'import numpy as np\n'), ((2743, 2760), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (2751, 2760), True, 'import numpy as np\n'), ((3011, 3042), 'numpy.zeros', 'np.zeros', (['(self.num_actions, 2)'], {}), '((self.num_actions, 2))\n', (3019, 3042), True, 'import numpy as np\n'), ((639, 663), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (653, 663), False, 'import random\n'), ((823, 841), 'numpy.array', 'np.array', (['[action]'], {}), '([action])\n', (831, 841), True, 'import numpy as np\n'), ((1176, 1192), 'numpy.sum', 'np.sum', (['strategy'], {}), '(strategy)\n', (1182, 1192), True, 'import numpy as np\n'), ((1413, 1447), 'numpy.random.random', 'np.random.random', (['self.num_actions'], {}), '(self.num_actions)\n', (1429, 1447), True, 'import numpy as np\n'), ((1466, 1476), 'numpy.sum', 'np.sum', (['s0'], {}), '(s0)\n', (1472, 1476), True, 'import numpy as np\n'), ((1508, 1557), 'numpy.full', 'np.full', (['self.num_actions', '(1.0 / self.num_actions)'], {}), '(self.num_actions, 1.0 / self.num_actions)\n', (1515, 1557), True, 'import numpy as np\n'), ((2837, 2853), 'numpy.array', 'np.array', (['payoff'], {}), '(payoff)\n', (2845, 2853), True, 'import numpy as np\n'), ((1237, 1253), 'numpy.sum', 'np.sum', (['strategy'], {}), '(strategy)\n', (1243, 1253), True, 'import numpy as np\n'), ((2299, 2317), 'numpy.array', 'np.array', (['strategy'], {}), '(strategy)\n', (2307, 2317), True, 'import numpy as np\n'), ((1745, 1754), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1751, 1754), True, 'import numpy as np\n'), ((2346, 2376), 'torch.from_numpy', 'torch.from_numpy', (['expected_vec'], {}), '(expected_vec)\n', (2362, 2376), False, 'import torch\n'), ((2497, 2519), 'numpy.ones', 'np.ones', (['rewards.shape'], {}), '(rewards.shape)\n', (2504, 2519), True, 'import numpy as np\n')] |
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import logging
import os
import tarfile
import urllib.request
import librosa
import numpy as np
from sklearn.model_selection import train_test_split
sr = 16000
duration_stride = 1.0
# google speech command v2
URL = "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz"
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
if not os.path.exists(destination):
logging.info(f"{destination} does not exist. Downloading ...")
urllib.request.urlretrieve(source, filename=destination + '.tmp')
os.rename(destination + '.tmp', destination)
logging.info(f"Downloaded {destination}.")
else:
logging.info(f"Destination {destination} exists. Skipping.")
return destination
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info('Not extracting. Maybe already there?')
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_dir)
else:
logging.info(f'Skipping extracting. Data already there {data_dir}')
def split_train_val_test(data_dir, file_type, test_size=0.1, val_size=0.1):
X = []
if file_type == "speech":
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)) and o.split("/")[-1] != "_background_noise_":
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
else:
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)):
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
else: # for using "_background_noise_" from google speech commands as background data
if o.endswith(".wav"):
X.append(os.path.join(data_dir, o))
X_train, X_test = train_test_split(X, test_size=test_size, random_state=1)
val_size_tmp = val_size / (1 - test_size)
X_train, X_val = train_test_split(X_train, test_size=val_size_tmp, random_state=1)
with open(os.path.join(data_dir, file_type + "_training_list.txt"), "w") as outfile:
outfile.write("\n".join(X_train))
with open(os.path.join(data_dir, file_type + "_testing_list.txt"), "w") as outfile:
outfile.write("\n".join(X_test))
with open(os.path.join(data_dir, file_type + "_validation_list.txt"), "w") as outfile:
outfile.write("\n".join(X_val))
logging.info(f'Overall: {len(X)}, Train: {len(X_train)}, Validatoin: {len(X_val)}, Test: {len(X_test)}')
logging.info(f"Finished split train, val and test for {file_type}. Write to files!")
def process_google_speech_train(data_dir):
X = []
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)) and o.split("/")[-1] != "_background_noise_":
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
short_files = [i.split(data_dir)[1] for i in files]
with open(os.path.join(data_dir, 'testing_list.txt'), 'r') as allfile:
testing_list = allfile.read().splitlines()
with open(os.path.join(data_dir, 'validation_list.txt'), 'r') as allfile:
validation_list = allfile.read().splitlines()
exist_set = set(testing_list).copy()
exist_set.update(set(validation_list))
training_list = [i for i in short_files if i not in exist_set]
with open(os.path.join(data_dir, "training_list.txt"), "w") as outfile:
outfile.write("\n".join(training_list))
logging.info(
f'Overall: {len(files)}, Train: {len(training_list)}, Validatoin: {len(validation_list)}, Test: {len(testing_list)}'
)
def write_manifest(
out_dir,
files,
prefix,
manifest_name,
duration_stride=1.0,
duration_max=None,
duration_limit=10.0,
filter_long=False,
):
"""
Given a list of files, segment each file and write them to manifest with restrictions.
Args:
out_dir: directory of generated manifest
files: list of files to be processed
prefix: label of samples
manifest_name: name of generated manifest
duration_stride: stride for segmenting audio samples
duration_max: duration for each segment
duration_limit: duration threshold for filtering out long audio samples
filter_long: boolean to determine whether to filter out long audio samples
Returns:
"""
seg_num = 0
skip_num = 0
if duration_max is None:
duration_max = 1e9
if not os.path.exists(out_dir):
logging.info(f'Outdir {out_dir} does not exist. Creat directory.')
os.mkdir(out_dir)
output_path = os.path.join(out_dir, manifest_name + '.json')
with open(output_path, 'w') as fout:
for file in files:
label = prefix
try:
x, _sr = librosa.load(file, sr=sr)
duration = librosa.get_duration(x, sr=sr)
except Exception:
continue
if filter_long and duration > duration_limit:
skip_num += 1
continue
offsets = []
durations = []
if duration > duration_max:
current_offset = 0.0
while current_offset < duration:
difference = duration - current_offset
segment_duration = min(duration_max, difference)
offsets.append(current_offset)
durations.append(segment_duration)
current_offset += duration_stride
else:
offsets.append(0.0)
durations.append(duration)
for duration, offset in zip(durations, offsets):
metadata = {
'audio_filepath': file,
'duration': duration,
'label': label,
'text': '_', # for compatibility with ASRAudioText
'offset': offset,
}
json.dump(metadata, fout)
fout.write('\n')
fout.flush()
seg_num += 1
return skip_num, seg_num, output_path
def load_list_write_manifest(
data_dir, out_dir, filename, prefix, duration_stride=1.0, duration_max=1.0, duration_limit=100.0, filter_long=True
):
filename = prefix + '_' + filename
file_path = os.path.join(data_dir, filename)
with open(file_path, 'r') as allfile:
files = allfile.read().splitlines()
manifest_name = filename.split('_list.txt')[0] + '_manifest'
skip_num, seg_num, output_path = write_manifest(
out_dir, files, prefix, manifest_name, duration_stride, duration_max, duration_limit, filter_long=True,
)
return skip_num, seg_num, output_path
def rebalance_json(data_dir, data_json, num, prefix):
data = []
seg = 0
for line in open(data_json, 'r'):
data.append(json.loads(line))
filename = data_json.split('/')[-1]
fout_path = os.path.join(data_dir, prefix + "_" + filename)
if len(data) >= num:
selected_sample = np.random.choice(data, num, replace=False)
else:
selected_sample = np.random.choice(data, num, replace=True)
with open(fout_path, 'a') as fout:
for i in selected_sample:
seg += 1
json.dump(i, fout)
fout.write('\n')
fout.flush()
logging.info(f'Get {seg}/{num} to {fout_path} from {data_json}')
return fout_path
def generate_variety_noise(data_dir, filename, prefix):
curr_dir = data_dir.split("_background_noise_")[0]
silence_path = os.path.join(curr_dir, "_background_noise_more")
if not os.path.exists(silence_path):
os.mkdir(silence_path)
silence_stride = 1000 # stride = 1/16 seconds
sampling_rate = 16000
silence_files = []
rng = np.random.RandomState(0)
filename = prefix + '_' + filename
file_path = os.path.join(data_dir, filename)
with open(file_path, 'r') as allfile:
files = allfile.read().splitlines()
for file in files:
y, sr = librosa.load(file, sr=sampling_rate)
for i in range(0, len(y) - sampling_rate, silence_stride):
file_name = "{}_{}.wav".format(file.split("/")[-1], i)
y_slice = y[i : i + sampling_rate]
magnitude = rng.uniform(0.0, 1.0)
y_slice *= magnitude
out_file_path = os.path.join(silence_path, file_name)
librosa.output.write_wav(out_file_path, y_slice, sr)
silence_files.append(out_file_path)
new_list_file = os.path.join(silence_path, filename)
with open(new_list_file, "w") as outfile:
outfile.write("\n".join(silence_files))
logging.info(f"Generate more background for {file_path}. => {new_list_file} !")
return len(silence_files)
def main():
parser = argparse.ArgumentParser(description='Speech and backgound data download and preprocess')
parser.add_argument("--out_dir", required=False, default='./manifest/', type=str)
parser.add_argument("--speech_data_root", required=True, default=None, type=str)
parser.add_argument("--background_data_root", required=True, default=None, type=str)
parser.add_argument('--test_size', required=False, default=0.1, type=float)
parser.add_argument('--val_size', required=False, default=0.1, type=float)
parser.add_argument('--log', required=False, action='store_true')
parser.add_argument('--rebalance_method', required=False, default=None, type=str)
parser.add_argument('--generate', required=False, action='store_true')
parser.set_defaults(log=False, generate=False)
args = parser.parse_args()
if not args.rebalance_method:
rebalance = False
else:
if args.rebalance_method != 'over' and args.rebalance_method != 'under' and args.rebalance_method != 'fixed':
raise NameError("Please select a valid sampling method: over/under/fixed.")
else:
rebalance = True
if args.log:
logging.basicConfig(level=logging.DEBUG)
# Download speech data
speech_data_root = args.speech_data_root
data_set = "google_speech_recognition_v2"
speech_data_folder = os.path.join(speech_data_root, data_set)
background_data_folder = args.background_data_root
logging.info(f"Working on: {data_set}")
# Download and extract speech data
if not os.path.exists(speech_data_folder):
file_path = os.path.join(speech_data_root, data_set + ".tar.bz2")
logging.info(f"Getting {data_set}")
__maybe_download_file(file_path, URL)
logging.info(f"Extracting {data_set}")
__extract_all_files(file_path, speech_data_root, speech_data_folder)
logging.info(f"Split speech data!")
# dataset provide testing.txt and validation.txt feel free to split data using that with process_google_speech_train
split_train_val_test(speech_data_folder, "speech", args.test_size, args.val_size)
logging.info(f"Split background data!")
split_train_val_test(background_data_folder, "background", args.test_size, args.val_size)
out_dir = args.out_dir
# Process Speech manifest
logging.info(f"=== Write speech data to manifest!")
skip_num_val, speech_seg_num_val, speech_val = load_list_write_manifest(
speech_data_folder, out_dir, 'validation_list.txt', 'speech', 1, 1
)
skip_num_test, speech_seg_num_test, speech_test = load_list_write_manifest(
speech_data_folder, out_dir, 'testing_list.txt', 'speech', 1, 1
)
skip_num_train, speech_seg_num_train, speech_train = load_list_write_manifest(
speech_data_folder, out_dir, 'training_list.txt', 'speech', 1, 1
)
logging.info(f'Val: Skip {skip_num_val} samples. Get {speech_seg_num_val} segments! => {speech_val} ')
logging.info(f'Test: Skip {skip_num_test} samples. Get {speech_seg_num_test} segments! => {speech_test}')
logging.info(f'Train: Skip {skip_num_train} samples. Get {speech_seg_num_train} segments!=> {speech_train}')
# Process background manifest
# if we select to generate more background noise data
if args.generate:
logging.info("Start generate more background noise data")
generate_variety_noise(background_data_folder, 'validation_list.txt', 'background')
generate_variety_noise(background_data_folder, 'training_list.txt', 'background')
generate_variety_noise(background_data_folder, 'testing_list.txt', 'background')
background_data_folder = os.path.join(
background_data_folder.split("_background_noise_")[0], "_background_noise_more"
)
logging.info(f"=== Write background data to manifest!")
skip_num_val, background_seg_num_val, background_val = load_list_write_manifest(
background_data_folder, out_dir, 'validation_list.txt', 'background', 1, 1
)
skip_num_test, background_seg_num_test, background_test = load_list_write_manifest(
background_data_folder, out_dir, 'testing_list.txt', 'background', 1, 1
)
skip_num_train, background_seg_num_train, background_train = load_list_write_manifest(
background_data_folder, out_dir, 'training_list.txt', 'background', 1, 1
)
logging.info(f'Val: Skip {skip_num_val} samples. Get {background_seg_num_val} segments! => {background_val}')
logging.info(f'Test: Skip {skip_num_test} samples. Get {background_seg_num_test} segments! => {background_test}')
logging.info(
f'Train: Skip {skip_num_train} samples. Get {background_seg_num_train} segments! => {background_train}'
)
min_val, max_val = min(speech_seg_num_val, background_seg_num_val), max(speech_seg_num_val, background_seg_num_val)
min_test, max_test = (
min(speech_seg_num_test, background_seg_num_test),
max(speech_seg_num_test, background_seg_num_test),
)
min_train, max_train = (
min(speech_seg_num_train, background_seg_num_train),
max(speech_seg_num_train, background_seg_num_train),
)
logging.info('Done generating manifest!')
if rebalance:
# Random Oversampling: Randomly duplicate examples in the minority class.
# Random Undersampling: Randomly delete examples in the majority class.
if args.rebalance_method == 'under':
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {min_val} Test: {min_test} Train: {min_train}!')
rebalance_json(out_dir, background_val, min_val, 'balanced')
rebalance_json(out_dir, background_test, min_test, 'balanced')
rebalance_json(out_dir, background_train, min_train, 'balanced')
rebalance_json(out_dir, speech_val, min_val, 'balanced')
rebalance_json(out_dir, speech_test, min_test, 'balanced')
rebalance_json(out_dir, speech_train, min_train, 'balanced')
if args.rebalance_method == 'over':
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {max_val} Test: {max_test} Train: {max_train}!')
rebalance_json(out_dir, background_val, max_val, 'balanced')
rebalance_json(out_dir, background_test, max_test, 'balanced')
rebalance_json(out_dir, background_train, max_train, 'balanced')
rebalance_json(out_dir, speech_val, max_val, 'balanced')
rebalance_json(out_dir, speech_test, max_test, 'balanced')
rebalance_json(out_dir, speech_train, max_train, 'balanced')
if args.rebalance_method == 'fixed':
fixed_test, fixed_val, fixed_train = 1000, 1000, 5000
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {fixed_val} Test: {fixed_test} Train: {fixed_train}!')
rebalance_json(out_dir, background_val, fixed_val, 'balanced')
rebalance_json(out_dir, background_test, fixed_test, 'balanced')
rebalance_json(out_dir, background_train, fixed_train, 'balanced')
rebalance_json(out_dir, speech_val, fixed_val, 'balanced')
rebalance_json(out_dir, speech_test, fixed_test, 'balanced')
rebalance_json(out_dir, speech_train, fixed_train, 'balanced')
else:
logging.info("Don't rebalance number of samples in classes.")
if __name__ == '__main__':
main()
| [
"os.mkdir",
"json.dump",
"argparse.ArgumentParser",
"logging.basicConfig",
"json.loads",
"sklearn.model_selection.train_test_split",
"os.rename",
"os.path.exists",
"numpy.random.RandomState",
"logging.info",
"librosa.load",
"numpy.random.choice",
"tarfile.open",
"librosa.output.write_wav",... | [((2765, 2821), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X'], {'test_size': 'test_size', 'random_state': '(1)'}), '(X, test_size=test_size, random_state=1)\n', (2781, 2821), False, 'from sklearn.model_selection import train_test_split\n'), ((2889, 2954), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train'], {'test_size': 'val_size_tmp', 'random_state': '(1)'}), '(X_train, test_size=val_size_tmp, random_state=1)\n', (2905, 2954), False, 'from sklearn.model_selection import train_test_split\n'), ((3461, 3550), 'logging.info', 'logging.info', (['f"""Finished split train, val and test for {file_type}. Write to files!"""'], {}), "(\n f'Finished split train, val and test for {file_type}. Write to files!')\n", (3473, 3550), False, 'import logging\n'), ((3615, 3635), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (3625, 3635), False, 'import os\n'), ((5551, 5597), 'os.path.join', 'os.path.join', (['out_dir', "(manifest_name + '.json')"], {}), "(out_dir, manifest_name + '.json')\n", (5563, 5597), False, 'import os\n'), ((7286, 7318), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (7298, 7318), False, 'import os\n'), ((7899, 7946), 'os.path.join', 'os.path.join', (['data_dir', "(prefix + '_' + filename)"], {}), "(data_dir, prefix + '_' + filename)\n", (7911, 7946), False, 'import os\n'), ((8305, 8370), 'logging.info', 'logging.info', (['f"""Get {seg}/{num} to {fout_path} from {data_json}"""'], {}), "(f'Get {seg}/{num} to {fout_path} from {data_json}')\n", (8317, 8370), False, 'import logging\n'), ((8525, 8573), 'os.path.join', 'os.path.join', (['curr_dir', '"""_background_noise_more"""'], {}), "(curr_dir, '_background_noise_more')\n", (8537, 8573), False, 'import os\n'), ((8759, 8783), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (8780, 8783), True, 'import numpy as np\n'), ((8840, 8872), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (8852, 8872), False, 'import os\n'), ((9497, 9533), 'os.path.join', 'os.path.join', (['silence_path', 'filename'], {}), '(silence_path, filename)\n', (9509, 9533), False, 'import os\n'), ((9633, 9712), 'logging.info', 'logging.info', (['f"""Generate more background for {file_path}. => {new_list_file} !"""'], {}), "(f'Generate more background for {file_path}. => {new_list_file} !')\n", (9645, 9712), False, 'import logging\n'), ((9770, 9863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Speech and backgound data download and preprocess"""'}), "(description=\n 'Speech and backgound data download and preprocess')\n", (9793, 9863), False, 'import argparse\n'), ((11122, 11162), 'os.path.join', 'os.path.join', (['speech_data_root', 'data_set'], {}), '(speech_data_root, data_set)\n', (11134, 11162), False, 'import os\n'), ((11223, 11262), 'logging.info', 'logging.info', (['f"""Working on: {data_set}"""'], {}), "(f'Working on: {data_set}')\n", (11235, 11262), False, 'import logging\n'), ((11643, 11678), 'logging.info', 'logging.info', (['f"""Split speech data!"""'], {}), "(f'Split speech data!')\n", (11655, 11678), False, 'import logging\n'), ((11891, 11930), 'logging.info', 'logging.info', (['f"""Split background data!"""'], {}), "(f'Split background data!')\n", (11903, 11930), False, 'import logging\n'), ((12088, 12139), 'logging.info', 'logging.info', (['f"""=== Write speech data to manifest!"""'], {}), "(f'=== Write speech data to manifest!')\n", (12100, 12139), False, 'import logging\n'), ((12623, 12735), 'logging.info', 'logging.info', (['f"""Val: Skip {skip_num_val} samples. Get {speech_seg_num_val} segments! => {speech_val} """'], {}), "(\n f'Val: Skip {skip_num_val} samples. Get {speech_seg_num_val} segments! => {speech_val} '\n )\n", (12635, 12735), False, 'import logging\n'), ((12730, 12845), 'logging.info', 'logging.info', (['f"""Test: Skip {skip_num_test} samples. Get {speech_seg_num_test} segments! => {speech_test}"""'], {}), "(\n f'Test: Skip {skip_num_test} samples. Get {speech_seg_num_test} segments! => {speech_test}'\n )\n", (12742, 12845), False, 'import logging\n'), ((12840, 12958), 'logging.info', 'logging.info', (['f"""Train: Skip {skip_num_train} samples. Get {speech_seg_num_train} segments!=> {speech_train}"""'], {}), "(\n f'Train: Skip {skip_num_train} samples. Get {speech_seg_num_train} segments!=> {speech_train}'\n )\n", (12852, 12958), False, 'import logging\n'), ((13555, 13610), 'logging.info', 'logging.info', (['f"""=== Write background data to manifest!"""'], {}), "(f'=== Write background data to manifest!')\n", (13567, 13610), False, 'import logging\n'), ((14142, 14261), 'logging.info', 'logging.info', (['f"""Val: Skip {skip_num_val} samples. Get {background_seg_num_val} segments! => {background_val}"""'], {}), "(\n f'Val: Skip {skip_num_val} samples. Get {background_seg_num_val} segments! => {background_val}'\n )\n", (14154, 14261), False, 'import logging\n'), ((14256, 14379), 'logging.info', 'logging.info', (['f"""Test: Skip {skip_num_test} samples. Get {background_seg_num_test} segments! => {background_test}"""'], {}), "(\n f'Test: Skip {skip_num_test} samples. Get {background_seg_num_test} segments! => {background_test}'\n )\n", (14268, 14379), False, 'import logging\n'), ((14374, 14501), 'logging.info', 'logging.info', (['f"""Train: Skip {skip_num_train} samples. Get {background_seg_num_train} segments! => {background_train}"""'], {}), "(\n f'Train: Skip {skip_num_train} samples. Get {background_seg_num_train} segments! => {background_train}'\n )\n", (14386, 14501), False, 'import logging\n'), ((14939, 14980), 'logging.info', 'logging.info', (['"""Done generating manifest!"""'], {}), "('Done generating manifest!')\n", (14951, 14980), False, 'import logging\n'), ((1185, 1212), 'os.path.exists', 'os.path.exists', (['destination'], {}), '(destination)\n', (1199, 1212), False, 'import os\n'), ((1222, 1284), 'logging.info', 'logging.info', (['f"""{destination} does not exist. Downloading ..."""'], {}), "(f'{destination} does not exist. Downloading ...')\n", (1234, 1284), False, 'import logging\n'), ((1367, 1411), 'os.rename', 'os.rename', (["(destination + '.tmp')", 'destination'], {}), "(destination + '.tmp', destination)\n", (1376, 1411), False, 'import os\n'), ((1420, 1462), 'logging.info', 'logging.info', (['f"""Downloaded {destination}."""'], {}), "(f'Downloaded {destination}.')\n", (1432, 1462), False, 'import logging\n'), ((1481, 1541), 'logging.info', 'logging.info', (['f"""Destination {destination} exists. Skipping."""'], {}), "(f'Destination {destination} exists. Skipping.')\n", (1493, 1541), False, 'import logging\n'), ((1638, 1660), 'tarfile.open', 'tarfile.open', (['filepath'], {}), '(filepath)\n', (1650, 1660), False, 'import tarfile\n'), ((1881, 1905), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (1895, 1905), False, 'import os\n'), ((1966, 2033), 'logging.info', 'logging.info', (['f"""Skipping extracting. Data already there {data_dir}"""'], {}), "(f'Skipping extracting. Data already there {data_dir}')\n", (1978, 2033), False, 'import logging\n'), ((2170, 2190), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2180, 2190), False, 'import os\n'), ((2395, 2415), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2405, 2415), False, 'import os\n'), ((5406, 5429), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (5420, 5429), False, 'import os\n'), ((5439, 5505), 'logging.info', 'logging.info', (['f"""Outdir {out_dir} does not exist. Creat directory."""'], {}), "(f'Outdir {out_dir} does not exist. Creat directory.')\n", (5451, 5505), False, 'import logging\n'), ((5514, 5531), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (5522, 5531), False, 'import os\n'), ((7999, 8041), 'numpy.random.choice', 'np.random.choice', (['data', 'num'], {'replace': '(False)'}), '(data, num, replace=False)\n', (8015, 8041), True, 'import numpy as np\n'), ((8078, 8119), 'numpy.random.choice', 'np.random.choice', (['data', 'num'], {'replace': '(True)'}), '(data, num, replace=True)\n', (8094, 8119), True, 'import numpy as np\n'), ((8586, 8614), 'os.path.exists', 'os.path.exists', (['silence_path'], {}), '(silence_path)\n', (8600, 8614), False, 'import os\n'), ((8624, 8646), 'os.mkdir', 'os.mkdir', (['silence_path'], {}), '(silence_path)\n', (8632, 8646), False, 'import os\n'), ((9000, 9036), 'librosa.load', 'librosa.load', (['file'], {'sr': 'sampling_rate'}), '(file, sr=sampling_rate)\n', (9012, 9036), False, 'import librosa\n'), ((10937, 10977), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (10956, 10977), False, 'import logging\n'), ((11314, 11348), 'os.path.exists', 'os.path.exists', (['speech_data_folder'], {}), '(speech_data_folder)\n', (11328, 11348), False, 'import os\n'), ((11370, 11423), 'os.path.join', 'os.path.join', (['speech_data_root', "(data_set + '.tar.bz2')"], {}), "(speech_data_root, data_set + '.tar.bz2')\n", (11382, 11423), False, 'import os\n'), ((11432, 11467), 'logging.info', 'logging.info', (['f"""Getting {data_set}"""'], {}), "(f'Getting {data_set}')\n", (11444, 11467), False, 'import logging\n'), ((11522, 11560), 'logging.info', 'logging.info', (['f"""Extracting {data_set}"""'], {}), "(f'Extracting {data_set}')\n", (11534, 11560), False, 'import logging\n'), ((13072, 13129), 'logging.info', 'logging.info', (['"""Start generate more background noise data"""'], {}), "('Start generate more background noise data')\n", (13084, 13129), False, 'import logging\n'), ((17294, 17355), 'logging.info', 'logging.info', (['"""Don\'t rebalance number of samples in classes."""'], {}), '("Don\'t rebalance number of samples in classes.")\n', (17306, 17355), False, 'import logging\n'), ((1744, 1796), 'logging.info', 'logging.info', (['"""Not extracting. Maybe already there?"""'], {}), "('Not extracting. Maybe already there?')\n", (1756, 1796), False, 'import logging\n'), ((2970, 3026), 'os.path.join', 'os.path.join', (['data_dir', "(file_type + '_training_list.txt')"], {}), "(data_dir, file_type + '_training_list.txt')\n", (2982, 3026), False, 'import os\n'), ((3101, 3156), 'os.path.join', 'os.path.join', (['data_dir', "(file_type + '_testing_list.txt')"], {}), "(data_dir, file_type + '_testing_list.txt')\n", (3113, 3156), False, 'import os\n'), ((3230, 3288), 'os.path.join', 'os.path.join', (['data_dir', "(file_type + '_validation_list.txt')"], {}), "(data_dir, file_type + '_validation_list.txt')\n", (3242, 3288), False, 'import os\n'), ((3877, 3919), 'os.path.join', 'os.path.join', (['data_dir', '"""testing_list.txt"""'], {}), "(data_dir, 'testing_list.txt')\n", (3889, 3919), False, 'import os\n'), ((4004, 4049), 'os.path.join', 'os.path.join', (['data_dir', '"""validation_list.txt"""'], {}), "(data_dir, 'validation_list.txt')\n", (4016, 4049), False, 'import os\n'), ((4290, 4333), 'os.path.join', 'os.path.join', (['data_dir', '"""training_list.txt"""'], {}), "(data_dir, 'training_list.txt')\n", (4302, 4333), False, 'import os\n'), ((7825, 7841), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7835, 7841), False, 'import json\n'), ((8227, 8245), 'json.dump', 'json.dump', (['i', 'fout'], {}), '(i, fout)\n', (8236, 8245), False, 'import json\n'), ((9325, 9362), 'os.path.join', 'os.path.join', (['silence_path', 'file_name'], {}), '(silence_path, file_name)\n', (9337, 9362), False, 'import os\n'), ((9375, 9427), 'librosa.output.write_wav', 'librosa.output.write_wav', (['out_file_path', 'y_slice', 'sr'], {}), '(out_file_path, y_slice, sr)\n', (9399, 9427), False, 'import librosa\n'), ((15219, 15326), 'logging.info', 'logging.info', (['f"""Rebalancing number of samples in classes using {args.rebalance_method} sampling."""'], {}), "(\n f'Rebalancing number of samples in classes using {args.rebalance_method} sampling.'\n )\n", (15231, 15326), False, 'import logging\n'), ((15329, 15397), 'logging.info', 'logging.info', (['f"""Val: {min_val} Test: {min_test} Train: {min_train}!"""'], {}), "(f'Val: {min_val} Test: {min_test} Train: {min_train}!')\n", (15341, 15397), False, 'import logging\n'), ((15895, 16002), 'logging.info', 'logging.info', (['f"""Rebalancing number of samples in classes using {args.rebalance_method} sampling."""'], {}), "(\n f'Rebalancing number of samples in classes using {args.rebalance_method} sampling.'\n )\n", (15907, 16002), False, 'import logging\n'), ((16005, 16073), 'logging.info', 'logging.info', (['f"""Val: {max_val} Test: {max_test} Train: {max_train}!"""'], {}), "(f'Val: {max_val} Test: {max_test} Train: {max_train}!')\n", (16017, 16073), False, 'import logging\n'), ((16639, 16746), 'logging.info', 'logging.info', (['f"""Rebalancing number of samples in classes using {args.rebalance_method} sampling."""'], {}), "(\n f'Rebalancing number of samples in classes using {args.rebalance_method} sampling.'\n )\n", (16651, 16746), False, 'import logging\n'), ((16749, 16823), 'logging.info', 'logging.info', (['f"""Val: {fixed_val} Test: {fixed_test} Train: {fixed_train}!"""'], {}), "(f'Val: {fixed_val} Test: {fixed_test} Train: {fixed_train}!')\n", (16761, 16823), False, 'import logging\n'), ((2446, 2471), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (2458, 2471), False, 'import os\n'), ((3662, 3687), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (3674, 3687), False, 'import os\n'), ((5737, 5762), 'librosa.load', 'librosa.load', (['file'], {'sr': 'sr'}), '(file, sr=sr)\n', (5749, 5762), False, 'import librosa\n'), ((5790, 5820), 'librosa.get_duration', 'librosa.get_duration', (['x'], {'sr': 'sr'}), '(x, sr=sr)\n', (5810, 5820), False, 'import librosa\n'), ((6917, 6942), 'json.dump', 'json.dump', (['metadata', 'fout'], {}), '(metadata, fout)\n', (6926, 6942), False, 'import json\n'), ((2221, 2246), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (2233, 2246), False, 'import os\n'), ((2715, 2740), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (2727, 2740), False, 'import os\n'), ((3766, 3791), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (3778, 3791), False, 'import os\n'), ((2329, 2354), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (2341, 2354), False, 'import os\n'), ((2509, 2534), 'os.path.join', 'os.path.join', (['data_dir', 'o'], {}), '(data_dir, o)\n', (2521, 2534), False, 'import os\n')] |
# coding:utf-8
# Author: 阿财(<EMAIL>)(<EMAIL>)
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
import empyrical
#from PyEMD import EEMD, EMD, Visualisation
from GolemQ.analysis.timeseries import *
from GolemQ.utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
FEATURES as FTR
)
from GolemQ.portfolio.utils import (
calc_onhold_returns_v2,
)
"""
# EEMD分解
# 经验模式分解(empirical mode decomposition, EMD)方法是Huang提出的,
# 它是一种新的时频分析方法, 而且是一种自适应的时频局部化分析方法:
# ①IMF与采样频率相关;
# ②它基于数据本身变化。
# 这点是EMD分解优于快速傅立叶(FFT)变换和小波(Wavelet)变换方法的地方
"""
def calc_eemd_func(features,):
'''
# EEMD分解
# 经验模式分解(empirical mode decomposition, EMD)方法是Huang提出的,
# 它是一种新的时频分析方法, 而且是一种自适应的时频局部化分析方法:
# ①IMF与采样频率相关;
# ②它基于数据本身变化。
# 这点是EMD分解优于快速傅立叶(FFT)变换和小波(Wavelet)变换方法的地方
'''
max_imf = 17
S = features[FLD.MAXFACTOR].dropna().values
T = range(len(S))
eemd = EEMD()
eemd.trials = 50
eemd.noise_seed(12345)
imfs = eemd.eemd(S, T, max_imf)
imfNo = imfs.shape[0]
#emd = EMD()
#emd.emd(S)
#imfs, res = emd.get_imfs_and_residue()
#imfNo = imfs.shape[0]
tMin, tMax = np.min(T), np.max(T)
# 补全NaN
S = features[FLD.MAXFACTOR].values
leakNum = len(S) - len(T)
T = range(len(S))
imfs = np.c_[np.full((imfNo, leakNum), np.nan), imfs]
return imfs, imfNo
def calc_best_imf_periods(features, imfs, imfNo,
symbol=None, display_name=None, taxfee=0.0003, annual=252, verbose=False):
'''
根据 imf 推断交易策略周期
筛选出主升浪imf拟合函数和次级波浪imf拟合函数
'''
if (symbol is None):
symbol = features.index.get_level_values(level=1)[0]
if (display_name is None):
display_name = symbol
imf_periods = pd.DataFrame(columns=[AKA.CODE,
'imf_num',
'imf',
'position',
FLD.TURNOVER_RATIO,
FLD.TURNOVER_RATIO_MEAN,
'returns',
FLD.TRANSACTION_RETURN_MEAN,
FLD.ANNUAL_RETURN,
FLD.ANNUAL_RETURN_MEAN])
dea_zero_turnover_ratio_mean = features[FLD.DEA_ZERO_TURNOVER_RATIO].dropna().mean()
best_imf4_candicate = None
with np.errstate(invalid='ignore', divide='ignore'):
for num in range(imfNo):
wavelet_cross = np.where(imfs[num] > np.r_[0, imfs[num, :-1]], 1, 0)
# Turnover Analysis 简化计算,全仓操作 100%换手
wavelet_turnover = np.where(wavelet_cross != np.r_[0, wavelet_cross[:-1]], 1, 0)
wavelet_turnover_ratio = rolling_sum(wavelet_turnover, annual)
wavelet_returns = features[FLD.PCT_CHANGE] * np.r_[0, wavelet_cross[:-1]]
wavelet_annual_return = wavelet_returns.rolling(annual).apply(lambda x:
empyrical.annual_return(x, annualization=annual),
raw=True)
turnover_ratio_mean = np.mean(wavelet_turnover_ratio[annual:])
annual_return_mean = np.mean((wavelet_annual_return.values - wavelet_turnover_ratio * taxfee)[annual:])
imf_periods = imf_periods.append(pd.Series({AKA.CODE:symbol,
'imf_num':num,
'imf':imfs[num],
ST.POSITION:wavelet_cross,
FLD.TURNOVER_RATIO:wavelet_turnover_ratio,
FLD.TURNOVER_RATIO_MEAN:turnover_ratio_mean,
'returns':wavelet_returns.values,
FLD.TRANSACTION_RETURN_MEAN: annual_return_mean / (turnover_ratio_mean if (turnover_ratio_mean > 2) else 2 / 2),
FLD.ANNUAL_RETURN:wavelet_annual_return.values - wavelet_turnover_ratio * taxfee,
FLD.ANNUAL_RETURN_MEAN:annual_return_mean,},
name=u"级别 {}".format(num + 1)))
# 寻找 级别 4 imf (DEA 零轴 同步)
wavelet_cross_timing_lag = calc_event_timing_lag(np.where(imfs[num] > np.r_[0, imfs[num, :-1]], 1, -1))
if (dea_zero_turnover_ratio_mean > turnover_ratio_mean) and \
(dea_zero_turnover_ratio_mean < (turnover_ratio_mean + 1)):
# 我怀疑这是单边行情的特征,但是我没有证据
best_imf4_candicate = u"级别 {}".format(num + 1)
elif (dea_zero_turnover_ratio_mean < turnover_ratio_mean) and \
(dea_zero_turnover_ratio_mean * 2 > turnover_ratio_mean):
# 2倍采样率,理论上可以最大化似然 DEA波浪,未经数学证明
best_imf4_candicate = u"级别 {}".format(num + 1)
if (verbose):
print('{} 级别{} 年化 returns:{:.2%}, 换手率 turnover:{:.0%}'.format(symbol, num + 1,
wavelet_annual_return[-1].item(),
wavelet_turnover_ratio[-1].item()))
imf_periods = imf_periods.sort_values(by=FLD.ANNUAL_RETURN_MEAN,
ascending=False)
#print(imf_periods[[FLD.ANNUAL_RETURN_MEAN,
# FLD.TRANSACTION_RETURN_MEAN,
# FLD.TURNOVER_RATIO_MEAN]])
# 选择年化收益大于9%,并且按年化收益排序选出最高的4个imf波动周期,
# 年化收益大于9.27%,单笔交易平均收益 > 3.82%,每年交易机会大于1次
# 这将是策略进行操作的参照周期,
# 其他的呢?对,没错,年化低于9%的垃圾标的你做来干啥?
best_imf_periods = imf_periods.query('({}>0.0927 & {}>0.0382 & {}>3.82) | \
({}>0.168 & {}>0.0168 & {}>0.618) | \
({}>0.168 & {}>0.00618 & {}<82) | \
({}>0.382 & {}>0.618 & {}>0.618) | \
({}>0.927 & {}>3.82)'.format(FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN,
FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN,
FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN,
FLD.ANNUAL_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.ANNUAL_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN,)).head(4).copy()
if (len(best_imf_periods) < 4):
# 入选的级别数量不足,尝试补全
print(symbol, u'入选的级别数量不足({} of 4),尝试补全'.format(len(best_imf_periods)))
rest_imf_periods = imf_periods.loc[imf_periods.index.difference(best_imf_periods.index), :].copy()
rest_imf_periods = rest_imf_periods.sort_values(by=FLD.ANNUAL_RETURN_MEAN,
ascending=False)
if (verbose):
print(rest_imf_periods[[AKA.CODE,
FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN]])
if (len(best_imf_periods) < 3):
best_imf_periods = best_imf_periods.append(imf_periods.loc[rest_imf_periods.index[[0, 1]], :])
else:
best_imf_periods = best_imf_periods.append(imf_periods.loc[rest_imf_periods.index[0], :])
if (annual > 253):
best_imf_periods = best_imf_periods.sort_values(by=FLD.TURNOVER_RATIO_MEAN,
ascending=False)
if (verbose):
print(best_imf_periods[[AKA.CODE,
FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN]])
if (len(best_imf_periods) >= 2):
if (annual > 253) and \
(best_imf_periods.loc[best_imf_periods.index[0], FLD.TURNOVER_RATIO_MEAN] > 200):
features[FTR.BEST_IMF1_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[1],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF1] = best_imf_periods.loc[best_imf_periods.index[1], 'imf']
features[FTR.BEST_IMF1_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[1], FLD.ANNUAL_RETURN]
features[FTR.BEST_IMF1_NORM] = rolling_pctrank(features[FTR.BEST_IMF1].values, 84)
features[FTR.BEST_IMF2_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[0],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF2] = best_imf_periods.loc[best_imf_periods.index[0], 'imf']
features[FTR.BEST_IMF2_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[0], FLD.ANNUAL_RETURN]
features[FTR.BEST_IMF2_NORM] = rolling_pctrank(features[FTR.BEST_IMF2].values, 84)
else:
features[FTR.BEST_IMF1_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[0],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF1] = best_imf_periods.loc[best_imf_periods.index[0], 'imf']
features[FTR.BEST_IMF1_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[0], FLD.ANNUAL_RETURN]
features[FTR.BEST_IMF1_NORM] = rolling_pctrank(features[FTR.BEST_IMF1].values, 84)
features[FTR.BEST_IMF2_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[1],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF2] = best_imf_periods.loc[best_imf_periods.index[1], 'imf']
features[FTR.BEST_IMF2_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[1], FLD.ANNUAL_RETURN]
features[FTR.BEST_IMF2_NORM] = rolling_pctrank(features[FTR.BEST_IMF2].values, 84)
if (len(best_imf_periods) >= 3):
if (best_imf4_candicate is None) or \
(best_imf_periods.index[2] != best_imf4_candicate) or \
(len(best_imf_periods) < 4):
features[FTR.BEST_IMF3_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[2],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF3] = best_imf_periods.loc[best_imf_periods.index[2], 'imf']
features[FTR.BEST_IMF3_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[2], FLD.ANNUAL_RETURN]
else:
features[FTR.BEST_IMF3_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[3],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF3] = best_imf_periods.loc[best_imf_periods.index[3], 'imf']
features[FTR.BEST_IMF3_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[3], FLD.ANNUAL_RETURN]
best_imf_periods.loc[best_imf_periods.index[2], :] = best_imf_periods.loc[best_imf_periods.index[3], :]
features[FTR.BEST_IMF3_NORM] = rolling_pctrank(features[FTR.BEST_IMF3].values, 84)
if (verbose):
print(best_imf_periods[[AKA.CODE,
FLD.ANNUAL_RETURN_MEAN,
FLD.TRANSACTION_RETURN_MEAN,
FLD.TURNOVER_RATIO_MEAN]])
if (len(best_imf_periods) >= 4) or \
(best_imf4_candicate is not None):
if (best_imf4_candicate is None):
features[FTR.BEST_IMF4_TIMING_LAG] = calc_event_timing_lag(np.where(best_imf_periods.loc[best_imf_periods.index[3],
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF4] = best_imf_periods.loc[best_imf_periods.index[3], 'imf']
features[FTR.BEST_IMF4_ANNUAL_RETURN] = best_imf_periods.loc[best_imf_periods.index[3], FLD.ANNUAL_RETURN]
features[FLD.MACD_TREND_DENSITY] = best_imf_periods.loc[best_imf_periods.index[3], FLD.TURNOVER_RATIO]
print(symbol, display_name, '1 震荡',
round(best_imf_periods.loc[best_imf_periods.index[3],
FLD.TURNOVER_RATIO_MEAN], 3),
round(dea_zero_turnover_ratio_mean, 3))
else:
features[FTR.BEST_IMF4_TIMING_LAG] = calc_event_timing_lag(np.where(imf_periods.loc[best_imf4_candicate,
ST.POSITION] > 0,1,-1))
features[FTR.BEST_IMF4] = imf_periods.loc[best_imf4_candicate, 'imf']
features[FTR.BEST_IMF4_ANNUAL_RETURN] = imf_periods.loc[best_imf4_candicate, FLD.ANNUAL_RETURN]
features[FLD.MACD_TREND_DENSITY] = imf_periods.loc[best_imf4_candicate]
best_imf_periods.loc[best_imf_periods.index[3], :] = imf_periods.loc[best_imf4_candicate, :]
print(symbol, best_imf4_candicate,
'2 大概率主升浪' if (imf_periods.loc[best_imf4_candicate, FLD.TURNOVER_RATIO_MEAN] < 6.18) else '2 震荡',
round(imf_periods.loc[best_imf4_candicate,
FLD.TURNOVER_RATIO_MEAN], 3),
round(dea_zero_turnover_ratio_mean, 3),)
features[FTR.BEST_IMF4_NORM] = rolling_pctrank(features[FTR.BEST_IMF4].values, 84)
#print(best_imf_periods[[AKA.CODE,
# FLD.ANNUAL_RETURN_MEAN,
# FLD.TRANSACTION_RETURN_MEAN,
# FLD.TURNOVER_RATIO_MEAN]])
features[FTR.BEST_IMF1_RETURNS] = calc_onhold_returns_v2(features[FLD.PCT_CHANGE].values,
features[FTR.BEST_IMF1_TIMING_LAG].values)
features[FTR.BEST_IMF1_TRANSACTION_RETURNS] = np.where((features[FTR.BEST_IMF1_TIMING_LAG] <= 0) & \
(features[FTR.BEST_IMF1_TIMING_LAG].shift() > 0),features[FTR.BEST_IMF1_RETURNS], 0)
return features
portfolio_briefs = pd.DataFrame(columns=['symbol',
'name',
'portfolio',
'sharpe',
'annual_return',
'max_drawdown',
'turnover_ratio'])
if (FTR.BEST_IMF1_TIMING_LAG in features.columns) and (FTR.BEST_IMF2_TIMING_LAG in features.columns):
wavelet_cross = np.where((features[FTR.BEST_IMF1_TIMING_LAG] + features[FTR.BEST_IMF2_TIMING_LAG]) > 0, 1, 0)
features[FTR.BOOST_IMF_TIMING_LAG] = wavelet_cross
portfolio_briefs = portfolio_briefs.append(calc_strategy_stats(symbol,
display_name,
'EEMD Boost',
wavelet_cross,
features), ignore_index=True)
else:
print('Code {}, {} EMD分析失败。\n'.format(symbol, display_name))
QA.QA_util_log_info('Code {}, {} EMD分析失败。\n'.format(symbol, display_name))
| [
"pandas.DataFrame",
"GolemQ.portfolio.utils.calc_onhold_returns_v2",
"numpy.full",
"empyrical.annual_return",
"numpy.errstate",
"numpy.min",
"numpy.max",
"numpy.where",
"numpy.mean"
] | [((2924, 3124), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "[AKA.CODE, 'imf_num', 'imf', 'position', FLD.TURNOVER_RATIO, FLD.\n TURNOVER_RATIO_MEAN, 'returns', FLD.TRANSACTION_RETURN_MEAN, FLD.\n ANNUAL_RETURN, FLD.ANNUAL_RETURN_MEAN]"}), "(columns=[AKA.CODE, 'imf_num', 'imf', 'position', FLD.\n TURNOVER_RATIO, FLD.TURNOVER_RATIO_MEAN, 'returns', FLD.\n TRANSACTION_RETURN_MEAN, FLD.ANNUAL_RETURN, FLD.ANNUAL_RETURN_MEAN])\n", (2936, 3124), True, 'import pandas as pd\n'), ((16451, 16554), 'GolemQ.portfolio.utils.calc_onhold_returns_v2', 'calc_onhold_returns_v2', (['features[FLD.PCT_CHANGE].values', 'features[FTR.BEST_IMF1_TIMING_LAG].values'], {}), '(features[FLD.PCT_CHANGE].values, features[FTR.\n BEST_IMF1_TIMING_LAG].values)\n', (16473, 16554), False, 'from GolemQ.portfolio.utils import calc_onhold_returns_v2\n'), ((16855, 16973), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['symbol', 'name', 'portfolio', 'sharpe', 'annual_return', 'max_drawdown',\n 'turnover_ratio']"}), "(columns=['symbol', 'name', 'portfolio', 'sharpe',\n 'annual_return', 'max_drawdown', 'turnover_ratio'])\n", (16867, 16973), True, 'import pandas as pd\n'), ((2332, 2341), 'numpy.min', 'np.min', (['T'], {}), '(T)\n', (2338, 2341), True, 'import numpy as np\n'), ((2343, 2352), 'numpy.max', 'np.max', (['T'], {}), '(T)\n', (2349, 2352), True, 'import numpy as np\n'), ((3607, 3653), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (3618, 3653), True, 'import numpy as np\n'), ((17370, 17466), 'numpy.where', 'np.where', (['(features[FTR.BEST_IMF1_TIMING_LAG] + features[FTR.BEST_IMF2_TIMING_LAG] > 0)', '(1)', '(0)'], {}), '(features[FTR.BEST_IMF1_TIMING_LAG] + features[FTR.\n BEST_IMF2_TIMING_LAG] > 0, 1, 0)\n', (17378, 17466), True, 'import numpy as np\n'), ((2474, 2507), 'numpy.full', 'np.full', (['(imfNo, leakNum)', 'np.nan'], {}), '((imfNo, leakNum), np.nan)\n', (2481, 2507), True, 'import numpy as np\n'), ((3717, 3769), 'numpy.where', 'np.where', (['(imfs[num] > np.r_[0, imfs[num, :-1]])', '(1)', '(0)'], {}), '(imfs[num] > np.r_[0, imfs[num, :-1]], 1, 0)\n', (3725, 3769), True, 'import numpy as np\n'), ((3851, 3912), 'numpy.where', 'np.where', (['(wavelet_cross != np.r_[0, wavelet_cross[:-1]])', '(1)', '(0)'], {}), '(wavelet_cross != np.r_[0, wavelet_cross[:-1]], 1, 0)\n', (3859, 3912), True, 'import numpy as np\n'), ((4395, 4435), 'numpy.mean', 'np.mean', (['wavelet_turnover_ratio[annual:]'], {}), '(wavelet_turnover_ratio[annual:])\n', (4402, 4435), True, 'import numpy as np\n'), ((4469, 4556), 'numpy.mean', 'np.mean', (['(wavelet_annual_return.values - wavelet_turnover_ratio * taxfee)[annual:]'], {}), '((wavelet_annual_return.values - wavelet_turnover_ratio * taxfee)[\n annual:])\n', (4476, 4556), True, 'import numpy as np\n'), ((5674, 5727), 'numpy.where', 'np.where', (['(imfs[num] > np.r_[0, imfs[num, :-1]])', '(1)', '(-1)'], {}), '(imfs[num] > np.r_[0, imfs[num, :-1]], 1, -1)\n', (5682, 5727), True, 'import numpy as np\n'), ((10393, 10479), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[1], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[1], ST.POSITION] > 0, \n 1, -1)\n', (10401, 10479), True, 'import numpy as np\n'), ((10955, 11041), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[0], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[0], ST.POSITION] > 0, \n 1, -1)\n', (10963, 11041), True, 'import numpy as np\n'), ((11530, 11616), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[0], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[0], ST.POSITION] > 0, \n 1, -1)\n', (11538, 11616), True, 'import numpy as np\n'), ((12092, 12178), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[1], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[1], ST.POSITION] > 0, \n 1, -1)\n', (12100, 12178), True, 'import numpy as np\n'), ((12846, 12932), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[2], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[2], ST.POSITION] > 0, \n 1, -1)\n', (12854, 12932), True, 'import numpy as np\n'), ((13326, 13412), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[3], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[3], ST.POSITION] > 0, \n 1, -1)\n', (13334, 13412), True, 'import numpy as np\n'), ((14361, 14447), 'numpy.where', 'np.where', (['(best_imf_periods.loc[best_imf_periods.index[3], ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(best_imf_periods.loc[best_imf_periods.index[3], ST.POSITION] > 0, \n 1, -1)\n', (14369, 14447), True, 'import numpy as np\n'), ((15212, 15282), 'numpy.where', 'np.where', (['(imf_periods.loc[best_imf4_candicate, ST.POSITION] > 0)', '(1)', '(-1)'], {}), '(imf_periods.loc[best_imf4_candicate, ST.POSITION] > 0, 1, -1)\n', (15220, 15282), True, 'import numpy as np\n'), ((4229, 4277), 'empyrical.annual_return', 'empyrical.annual_return', (['x'], {'annualization': 'annual'}), '(x, annualization=annual)\n', (4252, 4277), False, 'import empyrical\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
CityScapes Segmentation data-iterator code.
'''
import os
import numpy as np
import cv2
import nnabla as nn
from nnabla.utils.data_iterator import data_iterator_simple
from nnabla.utils.image_utils import imread
import image_preprocess
class CityScapesDatasetPath(object):
'''
A Helper Class which resolves the path to images
in CityScapes dataset.
'''
def __init__(self, data_dir=None):
self.data_dir = data_dir
self.train_file = os.path.join(self.data_dir, 'train.txt')
self.val_file = os.path.join(self.data_dir, 'val.txt')
def get_image_path(self, name, train):
folder = 'train' if train else 'val'
return os.path.join(self.data_dir, 'leftImg8bit', folder, name + '_leftImg8bit.png')
def get_label_path(self, name, train):
folder = 'train' if train else 'val'
return os.path.join(self.data_dir, 'gtFine', folder, name + '_gtFine_labelTrainIds.png')
def get_image_paths(self, train=True):
file_name = self.train_file if train else self.val_file
names = np.loadtxt(file_name, dtype=str)
return [self.get_image_path(name, train) for name in names]
def get_label_paths(self, train=True):
file_name = self.train_file if train else self.val_file
names = np.loadtxt(file_name, dtype=str)
return [self.get_label_path(name, train) for name in names]
def palette_png_reader(fname):
'''
'''
assert 'PilBackend' in nn.utils.image_utils.get_available_backends()
if nn.utils.image_utils.get_backend() != 'PilBackend':
nn.utils.image_utils.set_backend("PilBackEnd")
return imread(fname, return_palette_indices=True)
def data_iterator_segmentation(batch_size, image_paths, label_paths, rng=None, train=True):
'''
Returns a data iterator object for semantic image segmentation dataset.
Args:
batch_size (int): Batch size
image_paths (list of str): A list of image paths
label_paths (list of str): A list of label image paths
rng (None or numpy.random.RandomState):
A random number generator used in shuffling dataset and data augmentation.
train (bool): It performs random data augmentation as preprocessing if train is True.
num_classs (int): Number of classes. Requierd if `label_mask_transformer` is not passed.
'''
assert len(image_paths) == len(label_paths)
num_examples = len(image_paths)
def image_label_load_func(i):
'''
Returns:
image: c x h x w array
label: c x h x w array
'''
img = cv2.imread(image_paths[i], cv2.IMREAD_COLOR)
lab = palette_png_reader(label_paths[i])
img, lab = image_preprocess.preprocess_image_and_label(
img, lab, rng=rng)
return img, lab
return data_iterator_simple(image_label_load_func, num_examples, batch_size, shuffle=train, rng=rng)
def data_iterator_cityscapes(batch_size, data_dir, rng=None, train=True):
'''
Returns a data iterator object for CityScapes segmentation dataset.
args:
data_dir (str):
A folder contains CityScapes dataset.
See `data_iterator_segmentation` for other arguments.
'''
cityscapes = CityScapesDatasetPath(data_dir)
image_paths = cityscapes.get_image_paths(train=train)
label_paths = cityscapes.get_label_paths(train=train)
return data_iterator_segmentation(batch_size, image_paths, label_paths, rng, train)
| [
"nnabla.utils.image_utils.get_backend",
"nnabla.utils.data_iterator.data_iterator_simple",
"image_preprocess.preprocess_image_and_label",
"cv2.imread",
"nnabla.utils.image_utils.set_backend",
"numpy.loadtxt",
"nnabla.utils.image_utils.get_available_backends",
"os.path.join",
"nnabla.utils.image_util... | [((2232, 2274), 'nnabla.utils.image_utils.imread', 'imread', (['fname'], {'return_palette_indices': '(True)'}), '(fname, return_palette_indices=True)\n', (2238, 2274), False, 'from nnabla.utils.image_utils import imread\n'), ((3424, 3521), 'nnabla.utils.data_iterator.data_iterator_simple', 'data_iterator_simple', (['image_label_load_func', 'num_examples', 'batch_size'], {'shuffle': 'train', 'rng': 'rng'}), '(image_label_load_func, num_examples, batch_size,\n shuffle=train, rng=rng)\n', (3444, 3521), False, 'from nnabla.utils.data_iterator import data_iterator_simple\n'), ((1063, 1103), 'os.path.join', 'os.path.join', (['self.data_dir', '"""train.txt"""'], {}), "(self.data_dir, 'train.txt')\n", (1075, 1103), False, 'import os\n'), ((1128, 1166), 'os.path.join', 'os.path.join', (['self.data_dir', '"""val.txt"""'], {}), "(self.data_dir, 'val.txt')\n", (1140, 1166), False, 'import os\n'), ((1271, 1348), 'os.path.join', 'os.path.join', (['self.data_dir', '"""leftImg8bit"""', 'folder', "(name + '_leftImg8bit.png')"], {}), "(self.data_dir, 'leftImg8bit', folder, name + '_leftImg8bit.png')\n", (1283, 1348), False, 'import os\n'), ((1453, 1538), 'os.path.join', 'os.path.join', (['self.data_dir', '"""gtFine"""', 'folder', "(name + '_gtFine_labelTrainIds.png')"], {}), "(self.data_dir, 'gtFine', folder, name +\n '_gtFine_labelTrainIds.png')\n", (1465, 1538), False, 'import os\n'), ((1659, 1691), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {'dtype': 'str'}), '(file_name, dtype=str)\n', (1669, 1691), True, 'import numpy as np\n'), ((1884, 1916), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {'dtype': 'str'}), '(file_name, dtype=str)\n', (1894, 1916), True, 'import numpy as np\n'), ((2061, 2106), 'nnabla.utils.image_utils.get_available_backends', 'nn.utils.image_utils.get_available_backends', ([], {}), '()\n', (2104, 2106), True, 'import nnabla as nn\n'), ((2114, 2148), 'nnabla.utils.image_utils.get_backend', 'nn.utils.image_utils.get_backend', ([], {}), '()\n', (2146, 2148), True, 'import nnabla as nn\n'), ((2174, 2220), 'nnabla.utils.image_utils.set_backend', 'nn.utils.image_utils.set_backend', (['"""PilBackEnd"""'], {}), "('PilBackEnd')\n", (2206, 2220), True, 'import nnabla as nn\n'), ((3199, 3243), 'cv2.imread', 'cv2.imread', (['image_paths[i]', 'cv2.IMREAD_COLOR'], {}), '(image_paths[i], cv2.IMREAD_COLOR)\n', (3209, 3243), False, 'import cv2\n'), ((3312, 3374), 'image_preprocess.preprocess_image_and_label', 'image_preprocess.preprocess_image_and_label', (['img', 'lab'], {'rng': 'rng'}), '(img, lab, rng=rng)\n', (3355, 3374), False, 'import image_preprocess\n')] |
#!/usr/bin/env python
# coding: utf-8
import math
import numpy as np
# ### Generate the sensor environment
# generate input data streams
def gen_input(mu, mu_c, sigma, tau, L):
'''
Generate observation for one episode from expected mean and
covariance matrix.
Args:
mu : normal mean of the observations, all zeros
mu_c : abnormal mean of the observations
sigma : assumed covariance matrix of the observations
tau : the time that abnormality starts
L : total number of observations in one episode
Returns:
Xn : generated observations for one episode
'''
# generate multi-variate gaussian
Xn = np.random.multivariate_normal(mu, sigma, tau)
# print(Xn.shape) # (50, 10), first dimension is exp. episode
Xn_abnormal = np.random.multivariate_normal(mu_c, sigma, L - tau)
# print(Xn_abnormal.shape) # (150, 10)
Xn = np.vstack((Xn, Xn_abnormal))
return Xn
def visualize(Xn):
'''
Visualize generated observations for debugging.
Args:
Xn : generated observations for one episode
'''
print(Xn.shape)
import matplotlib.pyplot as plt
# Plot the sampled functions
plt.figure()
X = np.arange(Xn.shape[0])
for i in range(Xn.shape[1]):
plt.plot(X, Xn[:, i], linestyle='-', marker='o', markersize=3)
plt.xlabel('$x$', fontsize=13)
plt.ylabel('$y = f(x)$', fontsize=13)
plt.title('5 different function sampled from a Gaussian process')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.random.multivariate_normal",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.vstack"
] | [((675, 720), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'sigma', 'tau'], {}), '(mu, sigma, tau)\n', (704, 720), True, 'import numpy as np\n'), ((805, 856), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu_c', 'sigma', '(L - tau)'], {}), '(mu_c, sigma, L - tau)\n', (834, 856), True, 'import numpy as np\n'), ((909, 937), 'numpy.vstack', 'np.vstack', (['(Xn, Xn_abnormal)'], {}), '((Xn, Xn_abnormal))\n', (918, 937), True, 'import numpy as np\n'), ((1196, 1208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1206, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1239), 'numpy.arange', 'np.arange', (['Xn.shape[0]'], {}), '(Xn.shape[0])\n', (1226, 1239), True, 'import numpy as np\n'), ((1348, 1378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '(13)'}), "('$x$', fontsize=13)\n", (1358, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1383, 1420), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y = f(x)$"""'], {'fontsize': '(13)'}), "('$y = f(x)$', fontsize=13)\n", (1393, 1420), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1490), 'matplotlib.pyplot.title', 'plt.title', (['"""5 different function sampled from a Gaussian process"""'], {}), "('5 different function sampled from a Gaussian process')\n", (1434, 1490), True, 'import matplotlib.pyplot as plt\n'), ((1495, 1505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1503, 1505), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1343), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Xn[:, i]'], {'linestyle': '"""-"""', 'marker': '"""o"""', 'markersize': '(3)'}), "(X, Xn[:, i], linestyle='-', marker='o', markersize=3)\n", (1289, 1343), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 CEA
# <NAME>
# Licensed under the terms of the CECILL License
# (see guiqwt/__init__.py for details)
# pylint: disable=C0103
"""
guiqwt.builder
--------------
The `builder` module provides a builder singleton class
used to simplify the creation of plot items.
Example
~~~~~~~
Before creating any widget, a `QApplication` must be instantiated
(that is a `Qt` internal requirement):
>>> import guidata
>>> app = guidata.qapplication()
that is mostly equivalent to the following (the only difference is that
the `guidata` helper function also installs the `Qt` translation
corresponding to the system locale):
>>> from PyQt4.QtGui import QApplication
>>> app = QApplication([])
now that a `QApplication` object exists, we may create the plotting widget:
>>> from guiqwt.plot import ImageWidget
>>> widget = ImageWidget()
create curves, images, histograms, etc. and attach them to the plot:
>>> from guiqwt.builder import make
>>> curve = make.mcure(x, y, 'r+')
>>> image = make.image(data)
>>> hist = make.histogram(data, 100)
>>> for item in (curve, image, hist):
... widget.plot.add_item()
and then show the widget to screen:
>>> widget.show()
>>> app.exec_()
Reference
~~~~~~~~~
.. autoclass:: PlotItemBuilder
:members:
"""
import os.path as osp
from numpy import arange, array, zeros, meshgrid, ndarray
from qtpy.py3compat import is_text_string
# Local imports
from guiqwt.config import _, CONF, make_title
from guiqwt.baseplot import BasePlot
from guiqwt.curve import CurveItem, ErrorBarCurveItem, GridItem
from guiqwt.histogram import HistogramItem, lut_range_threshold
from guiqwt.image import (
ImageItem,
QuadGridItem,
TrImageItem,
XYImageItem,
Histogram2DItem,
RGBImageItem,
MaskedImageItem,
)
from guiqwt.shapes import (
XRangeSelection,
RectangleShape,
EllipseShape,
SegmentShape,
Marker,
)
from guiqwt.annotations import AnnotatedRectangle, AnnotatedEllipse, AnnotatedSegment
from guiqwt.styles import (
update_style_attr,
CurveParam,
ErrorBarParam,
style_generator,
LabelParam,
LegendParam,
ImageParam,
TrImageParam,
HistogramParam,
Histogram2DParam,
RGBImageParam,
MaskedImageParam,
XYImageParam,
ImageFilterParam,
MARKERS,
COLORS,
GridParam,
LineStyleParam,
AnnotationParam,
QuadGridParam,
LabelParamWithContents,
MarkerParam,
)
from guiqwt.label import (
LabelItem,
LegendBoxItem,
RangeComputation,
RangeComputation2d,
DataInfoLabel,
RangeInfo,
SelectedLegendBoxItem,
)
# default offset positions for anchors
ANCHOR_OFFSETS = {
"TL": (5, 5),
"TR": (-5, 5),
"BL": (5, -5),
"BR": (-5, -5),
"L": (5, 0),
"R": (-5, 0),
"T": (0, 5),
"B": (0, -5),
}
CURVE_COUNT = 0
HISTOGRAM_COUNT = 0
IMAGE_COUNT = 0
LABEL_COUNT = 0
HISTOGRAM2D_COUNT = 0
class PlotItemBuilder(object):
"""
This is just a bare class used to regroup
a set of factory functions in a single object
"""
def __init__(self):
self.style = style_generator()
def gridparam(
self,
background=None,
major_enabled=None,
minor_enabled=None,
major_style=None,
minor_style=None,
):
"""
Make `guiqwt.styles.GridParam` instance
* background = canvas background color
* major_enabled = tuple (major_xenabled, major_yenabled)
* minor_enabled = tuple (minor_xenabled, minor_yenabled)
* major_style = tuple (major_xstyle, major_ystyle)
* minor_style = tuple (minor_xstyle, minor_ystyle)
Style: tuple (style, color, width)
"""
gridparam = GridParam(title=_("Grid"), icon="lin_lin.png")
gridparam.read_config(CONF, "plot", "grid")
if background is not None:
gridparam.background = background
if major_enabled is not None:
gridparam.maj_xenabled, gridparam.maj_yenabled = major_enabled
if minor_enabled is not None:
gridparam.min_xenabled, gridparam.min_yenabled = minor_enabled
if major_style is not None:
style = LineStyleParam()
linestyle, color, style.width = major_style
style.set_style_from_matlab(linestyle)
style.color = COLORS.get(color, color) # MATLAB-style
if minor_style is not None:
style = LineStyleParam()
linestyle, color, style.width = minor_style
style.set_style_from_matlab(linestyle)
style.color = COLORS.get(color, color) # MATLAB-style
return gridparam
def grid(
self,
background=None,
major_enabled=None,
minor_enabled=None,
major_style=None,
minor_style=None,
):
"""
Make a grid `plot item` (`guiqwt.curve.GridItem` object)
* background = canvas background color
* major_enabled = tuple (major_xenabled, major_yenabled)
* minor_enabled = tuple (minor_xenabled, minor_yenabled)
* major_style = tuple (major_xstyle, major_ystyle)
* minor_style = tuple (minor_xstyle, minor_ystyle)
Style: tuple (style, color, width)
"""
gridparam = self.gridparam(
background, major_enabled, minor_enabled, major_style, minor_style
)
return GridItem(gridparam)
def __set_curve_axes(self, curve, xaxis, yaxis):
"""Set curve axes"""
for axis in (xaxis, yaxis):
if axis not in BasePlot.AXIS_NAMES:
raise RuntimeError("Unknown axis %s" % axis)
curve.setXAxis(BasePlot.AXIS_NAMES[xaxis])
curve.setYAxis(BasePlot.AXIS_NAMES[yaxis])
def __set_baseparam(
self,
param,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
):
"""Apply parameters to a `guiqwt.styles.CurveParam` or
`guiqwt.styles.MarkerParam` instance"""
if color is not None:
color = COLORS.get(color, color) # MATLAB-style
param.line.color = color
if linestyle is not None:
param.line.set_style_from_matlab(linestyle)
if linewidth is not None:
param.line.width = linewidth
if marker is not None:
if marker in MARKERS:
param.symbol.update_param(MARKERS[marker]) # MATLAB-style
else:
param.symbol.marker = marker
if markersize is not None:
param.symbol.size = markersize
if markerfacecolor is not None:
markerfacecolor = COLORS.get(
markerfacecolor, markerfacecolor
) # MATLAB-style
param.symbol.facecolor = markerfacecolor
if markeredgecolor is not None:
markeredgecolor = COLORS.get(
markeredgecolor, markeredgecolor
) # MATLAB-style
param.symbol.edgecolor = markeredgecolor
def __set_param(
self,
param,
title,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
shade,
curvestyle,
baseline,
):
"""Apply parameters to a `guiqwt.styles.CurveParam` instance"""
self.__set_baseparam(
param,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
)
if title:
param.label = title
if shade is not None:
param.shade = shade
if curvestyle is not None:
param.curvestyle = curvestyle
if baseline is not None:
param.baseline = baseline
def __get_arg_triple_plot(self, args):
"""Convert MATLAB-like arguments into x, y, style"""
def get_x_y_from_data(data):
if isinstance(data, (tuple, list)):
data = array(data)
if len(data.shape) == 1 or 1 in data.shape:
x = arange(data.size)
y = data
else:
x = arange(len(data[:, 0]))
y = [data[:, i] for i in range(len(data[0, :]))]
return x, y
if len(args) == 1:
if is_text_string(args[0]):
x = array((), float)
y = array((), float)
style = args[0]
else:
x, y = get_x_y_from_data(args[0])
y_matrix = not isinstance(y, ndarray)
if y_matrix:
style = [next(self.style) for yi in y]
else:
style = next(self.style)
elif len(args) == 2:
a1, a2 = args
if is_text_string(a2):
x, y = get_x_y_from_data(a1)
style = a2
else:
x = a1
y = a2
style = next(self.style)
elif len(args) == 3:
x, y, style = args
else:
raise TypeError("Wrong number of arguments")
if isinstance(x, (list, tuple)):
x = array(x)
if isinstance(y, (list, tuple)) and not y_matrix:
y = array(y)
return x, y, style
def __get_arg_triple_errorbar(self, args):
"""Convert MATLAB-like arguments into x, y, style"""
if len(args) == 2:
y, dy = args
x = arange(len(y))
dx = zeros(len(y))
style = next(self.style)
elif len(args) == 3:
a1, a2, a3 = args
if is_text_string(a3):
y, dy = a1, a2
x = arange(len(y))
dx = zeros(len(y))
style = a3
else:
x, y, dy = args
dx = zeros(len(y))
style = next(self.style)
elif len(args) == 4:
a1, a2, a3, a4 = args
if is_text_string(a4):
x, y, dy = a1, a2, a3
dx = zeros(len(y))
style = a4
else:
x, y, dx, dy = args
style = next(self.style)
elif len(args) == 5:
x, y, dx, dy, style = args
else:
raise TypeError("Wrong number of arguments")
return x, y, dx, dy, style
def mcurve(self, *args, **kwargs):
"""
Make a curve `plot item` based on MATLAB-like syntax
(may returns a list of curves if data contains more than one signal)
(:py:class:`guiqwt.curve.CurveItem` object)
Example::
mcurve(x, y, 'r+')
"""
x, y, style = self.__get_arg_triple_plot(args)
if isinstance(y, ndarray):
y = [y]
if not isinstance(style, list):
style = [style]
if len(y) > len(style):
style = [style[0]] * len(y)
basename = _("Curve")
curves = []
for yi, stylei in zip(y, style):
param = CurveParam(title=basename, icon="curve.png")
if "label" in kwargs:
param.label = kwargs.pop("label")
else:
global CURVE_COUNT
CURVE_COUNT += 1
param.label = make_title(basename, CURVE_COUNT)
update_style_attr(stylei, param)
curves.append(self.pcurve(x, yi, param, **kwargs))
if len(curves) == 1:
return curves[0]
else:
return curves
def pcurve(self, x, y, param, xaxis="bottom", yaxis="left"):
"""
Make a curve `plot item`
based on a `guiqwt.styles.CurveParam` instance
(:py:class:`guiqwt.curve.CurveItem` object)
Usage::
pcurve(x, y, param)
"""
curve = CurveItem(param)
curve.set_data(x, y)
curve.update_params()
self.__set_curve_axes(curve, xaxis, yaxis)
return curve
def curve(
self,
x,
y,
title="",
color=None,
linestyle=None,
linewidth=None,
marker=None,
markersize=None,
markerfacecolor=None,
markeredgecolor=None,
shade=None,
curvestyle=None,
baseline=None,
xaxis="bottom",
yaxis="left",
):
"""
Make a curve `plot item` from x, y, data
(:py:class:`guiqwt.curve.CurveItem` object)
* x: 1D NumPy array
* y: 1D NumPy array
* color: curve color name
* linestyle: curve line style (MATLAB-like string or "SolidLine",
"DashLine", "DotLine", "DashDotLine", "DashDotDotLine", "NoPen")
* linewidth: line width (pixels)
* marker: marker shape (MATLAB-like string or "Cross",
"Ellipse", "Star1", "XCross", "Rect", "Diamond", "UTriangle",
"DTriangle", "RTriangle", "LTriangle", "Star2", "NoSymbol")
* markersize: marker size (pixels)
* markerfacecolor: marker face color name
* markeredgecolor: marker edge color name
* shade: 0 <= float <= 1 (curve shade)
* curvestyle: "Lines", "Sticks", "Steps", "Dots", "NoCurve"
* baseline (float: default=0.0): the baseline is needed for filling
the curve with a brush or the Sticks drawing style.
* xaxis, yaxis: X/Y axes bound to curve
Example::
curve(x, y, marker='Ellipse', markerfacecolor='#ffffff')
which is equivalent to (MATLAB-style support)::
curve(x, y, marker='o', markerfacecolor='w')
"""
basename = _("Curve")
param = CurveParam(title=basename, icon="curve.png")
if not title:
global CURVE_COUNT
CURVE_COUNT += 1
title = make_title(basename, CURVE_COUNT)
self.__set_param(
param,
title,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
shade,
curvestyle,
baseline,
)
return self.pcurve(x, y, param, xaxis, yaxis)
def merror(self, *args, **kwargs):
"""
Make an errorbar curve `plot item` based on MATLAB-like syntax
(:py:class:`guiqwt.curve.ErrorBarCurveItem` object)
Example::
mcurve(x, y, 'r+')
"""
x, y, dx, dy, style = self.__get_arg_triple_errorbar(args)
basename = _("Curve")
curveparam = CurveParam(title=basename, icon="curve.png")
errorbarparam = ErrorBarParam(title=_("Error bars"), icon="errorbar.png")
if "label" in kwargs:
curveparam.label = kwargs["label"]
else:
global CURVE_COUNT
CURVE_COUNT += 1
curveparam.label = make_title(basename, CURVE_COUNT)
update_style_attr(style, curveparam)
errorbarparam.color = curveparam.line.color
return self.perror(x, y, dx, dy, curveparam, errorbarparam)
def perror(
self, x, y, dx, dy, curveparam, errorbarparam, xaxis="bottom", yaxis="left"
):
"""
Make an errorbar curve `plot item`
based on a `guiqwt.styles.ErrorBarParam` instance
(:py:class:`guiqwt.curve.ErrorBarCurveItem` object)
* x: 1D NumPy array
* y: 1D NumPy array
* dx: None, or scalar, or 1D NumPy array
* dy: None, or scalar, or 1D NumPy array
* curveparam: `guiqwt.styles.CurveParam` object
* errorbarparam: `guiqwt.styles.ErrorBarParam` object
* xaxis, yaxis: X/Y axes bound to curve
Usage::
perror(x, y, dx, dy, curveparam, errorbarparam)
"""
curve = ErrorBarCurveItem(curveparam, errorbarparam)
curve.set_data(x, y, dx, dy)
curve.update_params()
self.__set_curve_axes(curve, xaxis, yaxis)
return curve
def error(
self,
x,
y,
dx,
dy,
title="",
color=None,
linestyle=None,
linewidth=None,
errorbarwidth=None,
errorbarcap=None,
errorbarmode=None,
errorbaralpha=None,
marker=None,
markersize=None,
markerfacecolor=None,
markeredgecolor=None,
shade=None,
curvestyle=None,
baseline=None,
xaxis="bottom",
yaxis="left",
):
"""
Make an errorbar curve `plot item`
(:py:class:`guiqwt.curve.ErrorBarCurveItem` object)
* x: 1D NumPy array
* y: 1D NumPy array
* dx: None, or scalar, or 1D NumPy array
* dy: None, or scalar, or 1D NumPy array
* color: curve color name
* linestyle: curve line style (MATLAB-like string or attribute name
from the :py:class:`PyQt4.QtCore.Qt.PenStyle` enum
(i.e. "SolidLine" "DashLine", "DotLine", "DashDotLine",
"DashDotDotLine" or "NoPen")
* linewidth: line width (pixels)
* marker: marker shape (MATLAB-like string or attribute name from
the :py:class:`PyQt4.Qwt5.QwtSymbol.Style` enum (i.e. "Cross",
"Ellipse", "Star1", "XCross", "Rect", "Diamond", "UTriangle",
"DTriangle", "RTriangle", "LTriangle", "Star2" or "NoSymbol")
* markersize: marker size (pixels)
* markerfacecolor: marker face color name
* markeredgecolor: marker edge color name
* shade: 0 <= float <= 1 (curve shade)
* curvestyle: attribute name from the
:py:class:`PyQt4.Qwt5.QwtPlotCurve.CurveStyle` enum
(i.e. "Lines", "Sticks", "Steps", "Dots" or "NoCurve")
* baseline (float: default=0.0): the baseline is needed for filling
the curve with a brush or the Sticks drawing style.
* xaxis, yaxis: X/Y axes bound to curve
Example::
error(x, y, None, dy, marker='Ellipse', markerfacecolor='#ffffff')
which is equivalent to (MATLAB-style support)::
error(x, y, None, dy, marker='o', markerfacecolor='w')
"""
basename = _("Curve")
curveparam = CurveParam(title=basename, icon="curve.png")
errorbarparam = ErrorBarParam(title=_("Error bars"), icon="errorbar.png")
if not title:
global CURVE_COUNT
CURVE_COUNT += 1
curveparam.label = make_title(basename, CURVE_COUNT)
self.__set_param(
curveparam,
title,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
shade,
curvestyle,
baseline,
)
errorbarparam.color = curveparam.line.color
if errorbarwidth is not None:
errorbarparam.width = errorbarwidth
if errorbarcap is not None:
errorbarparam.cap = errorbarcap
if errorbarmode is not None:
errorbarparam.mode = errorbarmode
if errorbaralpha is not None:
errorbarparam.alpha = errorbaralpha
return self.perror(x, y, dx, dy, curveparam, errorbarparam, xaxis, yaxis)
def histogram(
self,
data,
bins=None,
logscale=None,
title="",
color=None,
xaxis="bottom",
yaxis="left",
):
"""
Make 1D Histogram `plot item`
(:py:class:`guiqwt.histogram.HistogramItem` object)
* data (1D NumPy array)
* bins: number of bins (int)
* logscale: Y-axis scale (bool)
"""
basename = _("Histogram")
histparam = HistogramParam(title=basename, icon="histogram.png")
curveparam = CurveParam(_("Curve"), icon="curve.png")
curveparam.read_config(CONF, "histogram", "curve")
if not title:
global HISTOGRAM_COUNT
HISTOGRAM_COUNT += 1
title = make_title(basename, HISTOGRAM_COUNT)
curveparam.label = title
if color is not None:
curveparam.line.color = color
if bins is not None:
histparam.n_bins = bins
if logscale is not None:
histparam.logscale = logscale
return self.phistogram(data, curveparam, histparam, xaxis, yaxis)
def phistogram(self, data, curveparam, histparam, xaxis="bottom", yaxis="left"):
"""
Make 1D histogram `plot item`
(:py:class:`guiqwt.histogram.HistogramItem` object)
based on a `guiqwt.styles.CurveParam` and
`guiqwt.styles.HistogramParam` instances
Usage::
phistogram(data, curveparam, histparam)
"""
hist = HistogramItem(curveparam, histparam)
hist.update_params()
hist.set_hist_data(data)
self.__set_curve_axes(hist, xaxis, yaxis)
return hist
def __set_image_param(
self, param, title, alpha_mask, alpha, interpolation, **kwargs
):
if title:
param.label = title
else:
global IMAGE_COUNT
IMAGE_COUNT += 1
param.label = make_title(_("Image"), IMAGE_COUNT)
if alpha_mask is not None:
assert isinstance(alpha_mask, bool)
param.alpha_mask = alpha_mask
if alpha is not None:
assert 0.0 <= alpha <= 1.0
param.alpha = alpha
interp_methods = {"nearest": 0, "linear": 1, "antialiasing": 5}
param.interpolation = interp_methods[interpolation]
for key, val in list(kwargs.items()):
if val is not None:
setattr(param, key, val)
def _get_image_data(self, data, filename, title, to_grayscale):
if data is None:
assert filename is not None
from guiqwt import io
data = io.imread(filename, to_grayscale=to_grayscale)
if title is None and filename is not None:
title = osp.basename(filename)
return data, filename, title
@staticmethod
def compute_bounds(data, pixel_size, center_on):
"""Return image bounds from *pixel_size* (scalar or tuple)"""
if not isinstance(pixel_size, (tuple, list)):
pixel_size = [pixel_size, pixel_size]
dx, dy = pixel_size
xmin, ymin = 0.0, 0.0
xmax, ymax = data.shape[1] * dx, data.shape[0] * dy
if center_on is not None:
xc, yc = center_on
dx, dy = 0.5 * (xmax - xmin) - xc, 0.5 * (ymax - ymin) - yc
xmin -= dx
xmax -= dx
ymin -= dy
ymax -= dy
return xmin, xmax, ymin, ymax
def image(
self,
data=None,
filename=None,
title=None,
alpha_mask=None,
alpha=None,
background_color=None,
colormap=None,
xdata=[None, None],
ydata=[None, None],
pixel_size=None,
center_on=None,
interpolation="linear",
eliminate_outliers=None,
xformat="%.1f",
yformat="%.1f",
zformat="%.1f",
):
"""
Make an image `plot item` from data
(:py:class:`guiqwt.image.ImageItem` object or
:py:class:`guiqwt.image.RGBImageItem` object if data has 3 dimensions)
"""
assert isinstance(xdata, (tuple, list)) and len(xdata) == 2
assert isinstance(ydata, (tuple, list)) and len(ydata) == 2
param = ImageParam(title=_("Image"), icon="image.png")
data, filename, title = self._get_image_data(
data, filename, title, to_grayscale=True
)
if data.ndim == 3:
return self.rgbimage(
data=data,
filename=filename,
title=title,
alpha_mask=alpha_mask,
alpha=alpha,
)
assert data.ndim == 2, "Data must have 2 dimensions"
if pixel_size is None:
assert center_on is None, (
"Ambiguous parameters: both `center_on`"
" and `xdata`/`ydata` were specified"
)
xmin, xmax = xdata
ymin, ymax = ydata
else:
xmin, xmax, ymin, ymax = self.compute_bounds(data, pixel_size, center_on)
self.__set_image_param(
param,
title,
alpha_mask,
alpha,
interpolation,
background=background_color,
colormap=colormap,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
xformat=xformat,
yformat=yformat,
zformat=zformat,
)
image = ImageItem(data, param)
image.set_filename(filename)
if eliminate_outliers is not None:
image.set_lut_range(lut_range_threshold(image, 256, eliminate_outliers))
return image
def maskedimage(
self,
data=None,
mask=None,
filename=None,
title=None,
alpha_mask=False,
alpha=1.0,
xdata=[None, None],
ydata=[None, None],
pixel_size=None,
center_on=None,
background_color=None,
colormap=None,
show_mask=False,
fill_value=None,
interpolation="linear",
eliminate_outliers=None,
xformat="%.1f",
yformat="%.1f",
zformat="%.1f",
):
"""
Make a masked image `plot item` from data
(:py:class:`guiqwt.image.MaskedImageItem` object)
"""
assert isinstance(xdata, (tuple, list)) and len(xdata) == 2
assert isinstance(ydata, (tuple, list)) and len(ydata) == 2
param = MaskedImageParam(title=_("Image"), icon="image.png")
data, filename, title = self._get_image_data(
data, filename, title, to_grayscale=True
)
assert data.ndim == 2, "Data must have 2 dimensions"
if pixel_size is None:
assert center_on is None, (
"Ambiguous parameters: both `center_on`"
" and `xdata`/`ydata` were specified"
)
xmin, xmax = xdata
ymin, ymax = ydata
else:
xmin, xmax, ymin, ymax = self.compute_bounds(data, pixel_size, center_on)
self.__set_image_param(
param,
title,
alpha_mask,
alpha,
interpolation,
background=background_color,
colormap=colormap,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
show_mask=show_mask,
fill_value=fill_value,
xformat=xformat,
yformat=yformat,
zformat=zformat,
)
image = MaskedImageItem(data, mask, param)
image.set_filename(filename)
if eliminate_outliers is not None:
image.set_lut_range(lut_range_threshold(image, 256, eliminate_outliers))
return image
def rgbimage(
self,
data=None,
filename=None,
title=None,
alpha_mask=False,
alpha=1.0,
xdata=[None, None],
ydata=[None, None],
pixel_size=None,
center_on=None,
interpolation="linear",
):
"""
Make a RGB image `plot item` from data
(:py:class:`guiqwt.image.RGBImageItem` object)
"""
assert isinstance(xdata, (tuple, list)) and len(xdata) == 2
assert isinstance(ydata, (tuple, list)) and len(ydata) == 2
param = RGBImageParam(title=_("Image"), icon="image.png")
data, filename, title = self._get_image_data(
data, filename, title, to_grayscale=False
)
assert data.ndim == 3, "RGB data must have 3 dimensions"
if pixel_size is None:
assert center_on is None, (
"Ambiguous parameters: both `center_on`"
" and `xdata`/`ydata` were specified"
)
xmin, xmax = xdata
ymin, ymax = ydata
else:
xmin, xmax, ymin, ymax = self.compute_bounds(data, pixel_size, center_on)
self.__set_image_param(
param,
title,
alpha_mask,
alpha,
interpolation,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
)
image = RGBImageItem(data, param)
image.set_filename(filename)
return image
def quadgrid(
self,
X,
Y,
Z,
filename=None,
title=None,
alpha_mask=None,
alpha=None,
background_color=None,
colormap=None,
interpolation="linear",
):
"""
Make a pseudocolor `plot item` of a 2D array
(:py:class:`guiqwt.image.QuadGridItem` object)
"""
param = QuadGridParam(title=_("Image"), icon="image.png")
self.__set_image_param(
param, title, alpha_mask, alpha, interpolation, colormap=colormap
)
image = QuadGridItem(X, Y, Z, param)
return image
def pcolor(self, *args, **kwargs):
"""
Make a pseudocolor `plot item` of a 2D array
based on MATLAB-like syntax
(:py:class:`guiqwt.image.QuadGridItem` object)
Examples::
pcolor(C)
pcolor(X, Y, C)
"""
if len(args) == 1:
(Z,) = args
M, N = Z.shape
X, Y = meshgrid(arange(N, dtype=Z.dtype), arange(M, dtype=Z.dtype))
elif len(args) == 3:
X, Y, Z = args
else:
raise RuntimeError("1 or 3 non-keyword arguments expected")
return self.quadgrid(X, Y, Z, **kwargs)
def trimage(
self,
data=None,
filename=None,
title=None,
alpha_mask=None,
alpha=None,
background_color=None,
colormap=None,
x0=0.0,
y0=0.0,
angle=0.0,
dx=1.0,
dy=1.0,
interpolation="linear",
eliminate_outliers=None,
xformat="%.1f",
yformat="%.1f",
zformat="%.1f",
):
"""
Make a transformable image `plot item` (image with an arbitrary
affine transform)
(:py:class:`guiqwt.image.TrImageItem` object)
* data: 2D NumPy array (image pixel data)
* filename: image filename (if data is not specified)
* title: image title (optional)
* x0, y0: position
* angle: angle (radians)
* dx, dy: pixel size along X and Y axes
* interpolation: 'nearest', 'linear' (default), 'antialiasing' (5x5)
"""
param = TrImageParam(title=_("Image"), icon="image.png")
data, filename, title = self._get_image_data(
data, filename, title, to_grayscale=True
)
self.__set_image_param(
param,
title,
alpha_mask,
alpha,
interpolation,
background=background_color,
colormap=colormap,
x0=x0,
y0=y0,
angle=angle,
dx=dx,
dy=dy,
xformat=xformat,
yformat=yformat,
zformat=zformat,
)
image = TrImageItem(data, param)
image.set_filename(filename)
if eliminate_outliers is not None:
image.set_lut_range(lut_range_threshold(image, 256, eliminate_outliers))
return image
def xyimage(
self,
x,
y,
data,
title=None,
alpha_mask=None,
alpha=None,
background_color=None,
colormap=None,
interpolation="linear",
eliminate_outliers=None,
xformat="%.1f",
yformat="%.1f",
zformat="%.1f",
):
"""
Make an xyimage `plot item` (image with non-linear X/Y axes) from data
(:py:class:`guiqwt.image.XYImageItem` object)
* x: 1D NumPy array (or tuple, list: will be converted to array)
* y: 1D NumPy array (or tuple, list: will be converted to array
* data: 2D NumPy array (image pixel data)
* title: image title (optional)
* interpolation: 'nearest', 'linear' (default), 'antialiasing' (5x5)
"""
param = XYImageParam(title=_("Image"), icon="image.png")
self.__set_image_param(
param,
title,
alpha_mask,
alpha,
interpolation,
background=background_color,
colormap=colormap,
xformat=xformat,
yformat=yformat,
zformat=zformat,
)
if isinstance(x, (list, tuple)):
x = array(x)
if isinstance(y, (list, tuple)):
y = array(y)
image = XYImageItem(x, y, data, param)
if eliminate_outliers is not None:
image.set_lut_range(lut_range_threshold(image, 256, eliminate_outliers))
return image
def imagefilter(self, xmin, xmax, ymin, ymax, imageitem, filter, title=None):
"""
Make a rectangular area image filter `plot item`
(:py:class:`guiqwt.image.ImageFilterItem` object)
* xmin, xmax, ymin, ymax: filter area bounds
* imageitem: An imageitem instance
* filter: function (x, y, data) --> data
"""
param = ImageFilterParam(_("Filter"), icon="funct.png")
param.xmin, param.xmax, param.ymin, param.ymax = xmin, xmax, ymin, ymax
if title is not None:
param.label = title
filt = imageitem.get_filter(filter, param)
_m, _M = imageitem.get_lut_range()
filt.set_lut_range([_m, _M])
return filt
def histogram2D(
self,
X,
Y,
NX=None,
NY=None,
logscale=None,
title=None,
transparent=None,
Z=None,
computation=-1,
interpolation=0,
):
"""
Make a 2D Histogram `plot item`
(:py:class:`guiqwt.image.Histogram2DItem` object)
* X: data (1D array)
* Y: data (1D array)
* NX: Number of bins along x-axis (int)
* NY: Number of bins along y-axis (int)
* logscale: Z-axis scale (bool)
* title: item title (string)
* transparent: enable transparency (bool)
"""
basename = _("2D Histogram")
param = Histogram2DParam(title=basename, icon="histogram2d.png")
if NX is not None:
param.nx_bins = NX
if NY is not None:
param.ny_bins = NY
if logscale is not None:
param.logscale = int(logscale)
if title is not None:
param.label = title
else:
global HISTOGRAM2D_COUNT
HISTOGRAM2D_COUNT += 1
param.label = make_title(basename, HISTOGRAM2D_COUNT)
if transparent is not None:
param.transparent = transparent
param.computation = computation
param.interpolation = interpolation
return Histogram2DItem(X, Y, param, Z=Z)
def label(self, text, g, c, anchor, title=""):
"""
Make a label `plot item`
(:py:class:`guiqwt.label.LabelItem` object)
* text: label text (string)
* g: position in plot coordinates (tuple)
or relative position (string)
* c: position in canvas coordinates (tuple)
* anchor: anchor position in relative position (string)
* title: label name (optional)
Examples::
make.label("Relative position", (x[0], y[0]), (10, 10), "BR")
make.label("Absolute position", "R", (0,0), "R")
"""
basename = _("Label")
param = LabelParamWithContents(basename, icon="label.png")
param.read_config(CONF, "plot", "label")
if title:
param.label = title
else:
global LABEL_COUNT
LABEL_COUNT += 1
param.label = make_title(basename, LABEL_COUNT)
if isinstance(g, tuple):
param.abspos = False
param.xg, param.yg = g
else:
param.abspos = True
param.absg = g
if c is None:
c = ANCHOR_OFFSETS[anchor]
param.xc, param.yc = c
param.anchor = anchor
return LabelItem(text, param)
def legend(self, anchor="TR", c=None, restrict_items=None):
"""
Make a legend `plot item`
(:py:class:`guiqwt.label.LegendBoxItem` or
:py:class:`guiqwt.label.SelectedLegendBoxItem` object)
* anchor: legend position in relative position (string)
* c (optional): position in canvas coordinates (tuple)
* restrict_items (optional):
- None: all items are shown in legend box
- []: no item shown
- [item1, item2]: item1, item2 are shown in legend box
"""
param = LegendParam(_("Legend"), icon="legend.png")
param.read_config(CONF, "plot", "legend")
param.abspos = True
param.absg = anchor
param.anchor = anchor
if c is None:
c = ANCHOR_OFFSETS[anchor]
param.xc, param.yc = c
if restrict_items is None:
return LegendBoxItem(param)
else:
return SelectedLegendBoxItem(param, restrict_items)
def range(self, xmin, xmax):
return XRangeSelection(xmin, xmax)
def vcursor(self, x, label=None, constraint_cb=None, movable=True, readonly=False):
"""
Make a vertical cursor `plot item`
Convenient function to make a vertical marker
(:py:class:`guiqwt.shapes.Marker` object)
"""
if label is None:
label_cb = lambda x, y: ""
else:
label_cb = lambda x, y: label % x
return self.marker(
position=(x, 0),
markerstyle="|",
label_cb=label_cb,
constraint_cb=constraint_cb,
movable=movable,
readonly=readonly,
)
def hcursor(self, y, label=None, constraint_cb=None, movable=True, readonly=False):
"""
Make an horizontal cursor `plot item`
Convenient function to make an horizontal marker
(:py:class:`guiqwt.shapes.Marker` object)
"""
if label is None:
label_cb = lambda x, y: ""
else:
label_cb = lambda x, y: label % y
return self.marker(
position=(0, y),
markerstyle="-",
label_cb=label_cb,
constraint_cb=constraint_cb,
movable=movable,
readonly=readonly,
)
def xcursor(
self, x, y, label=None, constraint_cb=None, movable=True, readonly=False
):
"""
Make an cross cursor `plot item`
Convenient function to make an cross marker
(:py:class:`guiqwt.shapes.Marker` object)
"""
if label is None:
label_cb = lambda x, y: ""
else:
label_cb = lambda x, y: label % (x, y)
return self.marker(
position=(x, y),
markerstyle="+",
label_cb=label_cb,
constraint_cb=constraint_cb,
movable=movable,
readonly=readonly,
)
def marker(
self,
position=None,
label_cb=None,
constraint_cb=None,
movable=True,
readonly=False,
markerstyle=None,
markerspacing=None,
color=None,
linestyle=None,
linewidth=None,
marker=None,
markersize=None,
markerfacecolor=None,
markeredgecolor=None,
):
"""
Make a marker `plot item`
(:py:class:`guiqwt.shapes.Marker` object)
* position: tuple (x, y)
* label_cb: function with two arguments (x, y) returning a string
* constraint_cb: function with two arguments (x, y) returning a
tuple (x, y) according to the marker constraint
* movable: if True (default), marker will be movable
* readonly: if False (default), marker can be deleted
* markerstyle: '+', '-', '|' or None
* markerspacing: spacing between text and marker line
* color: marker color name
* linestyle: marker line style (MATLAB-like string or attribute name
from the :py:class:`PyQt4.QtCore.Qt.PenStyle` enum
(i.e. "SolidLine" "DashLine", "DotLine", "DashDotLine",
"DashDotDotLine" or "NoPen")
* linewidth: line width (pixels)
* marker: marker shape (MATLAB-like string or "Cross", "Ellipse",
"Star1", "XCross", "Rect", "Diamond", "UTriangle", "DTriangle",
"RTriangle", "LTriangle", "Star2", "NoSymbol")
* markersize: marker size (pixels)
* markerfacecolor: marker face color name
* markeredgecolor: marker edge color name
"""
param = MarkerParam(_("Marker"), icon="marker.png")
param.read_config(CONF, "plot", "marker/cursor")
if (
color
or linestyle
or linewidth
or marker
or markersize
or markerfacecolor
or markeredgecolor
):
param.line = param.sel_line
param.symbol = param.sel_symbol
param.text = param.sel_text
self.__set_baseparam(
param,
color,
linestyle,
linewidth,
marker,
markersize,
markerfacecolor,
markeredgecolor,
)
param.sel_line = param.line
param.sel_symbol = param.symbol
param.sel_text = param.text
if markerstyle:
param.set_markerstyle(markerstyle)
if markerspacing:
param.spacing = markerspacing
if not movable:
param.symbol.marker = param.sel_symbol.marker = "NoSymbol"
marker = Marker(
label_cb=label_cb, constraint_cb=constraint_cb, markerparam=param
)
if position is not None:
x, y = position
marker.set_pos(x, y)
marker.set_readonly(readonly)
if not movable:
marker.set_movable(False)
marker.set_resizable(False)
return marker
def __shape(self, shapeclass, x0, y0, x1, y1, title=None):
shape = shapeclass(x0, y0, x1, y1)
shape.set_style("plot", "shape/drag")
if title is not None:
shape.setTitle(title)
return shape
def rectangle(self, x0, y0, x1, y1, title=None):
"""
Make a rectangle shape `plot item`
(:py:class:`guiqwt.shapes.RectangleShape` object)
* x0, y0, x1, y1: rectangle coordinates
* title: label name (optional)
"""
return self.__shape(RectangleShape, x0, y0, x1, y1, title)
def ellipse(self, x0, y0, x1, y1, title=None):
"""
Make an ellipse shape `plot item`
(:py:class:`guiqwt.shapes.EllipseShape` object)
* x0, y0, x1, y1: ellipse x-axis coordinates
* title: label name (optional)
"""
shape = EllipseShape(x0, y0, x1, y1)
shape.set_style("plot", "shape/drag")
if title is not None:
shape.setTitle(title)
return shape
def circle(self, x0, y0, x1, y1, title=None):
"""
Make a circle shape `plot item`
(:py:class:`guiqwt.shapes.EllipseShape` object)
* x0, y0, x1, y1: circle diameter coordinates
* title: label name (optional)
"""
return self.ellipse(x0, y0, x1, y1, title=title)
def segment(self, x0, y0, x1, y1, title=None):
"""
Make a segment shape `plot item`
(:py:class:`guiqwt.shapes.SegmentShape` object)
* x0, y0, x1, y1: segment coordinates
* title: label name (optional)
"""
return self.__shape(SegmentShape, x0, y0, x1, y1, title)
def __get_annotationparam(self, title, subtitle):
param = AnnotationParam(_("Annotation"), icon="annotation.png")
if title is not None:
param.title = title
if subtitle is not None:
param.subtitle = subtitle
return param
def __annotated_shape(self, shapeclass, x0, y0, x1, y1, title, subtitle):
param = self.__get_annotationparam(title, subtitle)
shape = shapeclass(x0, y0, x1, y1, param)
shape.set_style("plot", "shape/drag")
return shape
def annotated_rectangle(self, x0, y0, x1, y1, title=None, subtitle=None):
"""
Make an annotated rectangle `plot item`
(:py:class:`guiqwt.annotations.AnnotatedRectangle` object)
* x0, y0, x1, y1: rectangle coordinates
* title, subtitle: strings
"""
return self.__annotated_shape(
AnnotatedRectangle, x0, y0, x1, y1, title, subtitle
)
def annotated_ellipse(self, x0, y0, x1, y1, ratio, title=None, subtitle=None):
"""
Make an annotated ellipse `plot item`
(:py:class:`guiqwt.annotations.AnnotatedEllipse` object)
* x0, y0, x1, y1: ellipse rectangle coordinates
* ratio: ratio between y-axis and x-axis lengths
* title, subtitle: strings
"""
param = self.__get_annotationparam(title, subtitle)
shape = AnnotatedEllipse(x0, y0, x1, y1, ratio, param)
shape.set_style("plot", "shape/drag")
return shape
def annotated_circle(self, x0, y0, x1, y1, ratio, title=None, subtitle=None):
"""
Make an annotated circle `plot item`
(:py:class:`guiqwt.annotations.AnnotatedCircle` object)
* x0, y0, x1, y1: circle diameter coordinates
* title, subtitle: strings
"""
return self.annotated_ellipse(x0, y0, x1, y1, 1.0, title, subtitle)
def annotated_segment(self, x0, y0, x1, y1, title=None, subtitle=None):
"""
Make an annotated segment `plot item`
(:py:class:`guiqwt.annotations.AnnotatedSegment` object)
* x0, y0, x1, y1: segment coordinates
* title, subtitle: strings
"""
return self.__annotated_shape(AnnotatedSegment, x0, y0, x1, y1, title, subtitle)
def info_label(self, anchor, comps, title=None):
"""
Make an info label `plot item`
(:py:class:`guiqwt.label.DataInfoLabel` object)
"""
basename = _("Computation")
param = LabelParam(basename, icon="label.png")
param.read_config(CONF, "plot", "info_label")
if title is not None:
param.label = title
else:
global LABEL_COUNT
LABEL_COUNT += 1
param.label = make_title(basename, LABEL_COUNT)
param.abspos = True
param.absg = anchor
param.anchor = anchor
c = ANCHOR_OFFSETS[anchor]
param.xc, param.yc = c
return DataInfoLabel(param, comps)
def range_info_label(self, range, anchor, label, function=None, title=None):
"""
Make an info label `plot item` showing an XRangeSelection object infos
(:py:class:`guiqwt.label.DataInfoLabel` object)
(see example: :py:mod:`guiqwt.tests.computations`)
Default function is `lambda x, dx: (x, dx)`.
Example::
x = linspace(-10, 10, 10)
y = sin(sin(sin(x)))
range = make.range(-2, 2)
disp = make.range_info_label(range, 'BL', "x = %.1f ± %.1f cm",
lambda x, dx: (x, dx))
"""
info = RangeInfo(label, range, function)
return make.info_label(anchor, info, title=title)
def computation(self, range, anchor, label, curve, function, title=None):
"""
Make a computation label `plot item`
(:py:class:`guiqwt.label.DataInfoLabel` object)
(see example: :py:mod:`guiqwt.tests.computations`)
"""
if title is None:
title = curve.curveparam.label
return self.computations(range, anchor, [(curve, label, function)], title=title)
def computations(self, range, anchor, specs, title=None):
"""
Make computation labels `plot item`
(:py:class:`guiqwt.label.DataInfoLabel` object)
(see example: :py:mod:`guiqwt.tests.computations`)
"""
comps = []
same_curve = True
curve0 = None
for curve, label, function in specs:
comp = RangeComputation(label, curve, range, function)
comps.append(comp)
if curve0 is None:
curve0 = curve
same_curve = same_curve and curve is curve0
if title is None and same_curve:
title = curve.curveparam.label
return self.info_label(anchor, comps, title=title)
def computation2d(self, rect, anchor, label, image, function, title=None):
"""
Make a 2D computation label `plot item`
(:py:class:`guiqwt.label.RangeComputation2d` object)
(see example: :py:mod:`guiqwt.tests.computations`)
"""
return self.computations2d(
rect, anchor, [(image, label, function)], title=title
)
def computations2d(self, rect, anchor, specs, title=None):
"""
Make 2D computation labels `plot item`
(:py:class:`guiqwt.label.RangeComputation2d` object)
(see example: :py:mod:`guiqwt.tests.computations`)
"""
comps = []
same_image = True
image0 = None
for image, label, function in specs:
comp = RangeComputation2d(label, image, rect, function)
comps.append(comp)
if image0 is None:
image0 = image
same_image = same_image and image is image0
if title is None and same_image:
title = image.imageparam.label
return self.info_label(anchor, comps, title=title)
make = PlotItemBuilder()
| [
"guiqwt.histogram.lut_range_threshold",
"guiqwt.styles.CurveParam",
"guiqwt.image.MaskedImageItem",
"guiqwt.shapes.XRangeSelection",
"guiqwt.label.LabelItem",
"guiqwt.image.RGBImageItem",
"qtpy.py3compat.is_text_string",
"guiqwt.label.RangeComputation",
"guiqwt.styles.HistogramParam",
"numpy.arang... | [((3136, 3153), 'guiqwt.styles.style_generator', 'style_generator', ([], {}), '()\n', (3151, 3153), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((5491, 5510), 'guiqwt.curve.GridItem', 'GridItem', (['gridparam'], {}), '(gridparam)\n', (5499, 5510), False, 'from guiqwt.curve import CurveItem, ErrorBarCurveItem, GridItem\n'), ((11169, 11179), 'guiqwt.config._', '_', (['"""Curve"""'], {}), "('Curve')\n", (11170, 11179), False, 'from guiqwt.config import _, CONF, make_title\n'), ((12063, 12079), 'guiqwt.curve.CurveItem', 'CurveItem', (['param'], {}), '(param)\n', (12072, 12079), False, 'from guiqwt.curve import CurveItem, ErrorBarCurveItem, GridItem\n'), ((13945, 13955), 'guiqwt.config._', '_', (['"""Curve"""'], {}), "('Curve')\n", (13946, 13955), False, 'from guiqwt.config import _, CONF, make_title\n'), ((13972, 14016), 'guiqwt.styles.CurveParam', 'CurveParam', ([], {'title': 'basename', 'icon': '"""curve.png"""'}), "(title=basename, icon='curve.png')\n", (13982, 14016), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((14865, 14875), 'guiqwt.config._', '_', (['"""Curve"""'], {}), "('Curve')\n", (14866, 14875), False, 'from guiqwt.config import _, CONF, make_title\n'), ((14897, 14941), 'guiqwt.styles.CurveParam', 'CurveParam', ([], {'title': 'basename', 'icon': '"""curve.png"""'}), "(title=basename, icon='curve.png')\n", (14907, 14941), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((15248, 15284), 'guiqwt.styles.update_style_attr', 'update_style_attr', (['style', 'curveparam'], {}), '(style, curveparam)\n', (15265, 15284), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((16163, 16207), 'guiqwt.curve.ErrorBarCurveItem', 'ErrorBarCurveItem', (['curveparam', 'errorbarparam'], {}), '(curveparam, errorbarparam)\n', (16180, 16207), False, 'from guiqwt.curve import CurveItem, ErrorBarCurveItem, GridItem\n'), ((18659, 18669), 'guiqwt.config._', '_', (['"""Curve"""'], {}), "('Curve')\n", (18660, 18669), False, 'from guiqwt.config import _, CONF, make_title\n'), ((18691, 18735), 'guiqwt.styles.CurveParam', 'CurveParam', ([], {'title': 'basename', 'icon': '"""curve.png"""'}), "(title=basename, icon='curve.png')\n", (18701, 18735), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((20190, 20204), 'guiqwt.config._', '_', (['"""Histogram"""'], {}), "('Histogram')\n", (20191, 20204), False, 'from guiqwt.config import _, CONF, make_title\n'), ((20225, 20277), 'guiqwt.styles.HistogramParam', 'HistogramParam', ([], {'title': 'basename', 'icon': '"""histogram.png"""'}), "(title=basename, icon='histogram.png')\n", (20239, 20277), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((21281, 21317), 'guiqwt.histogram.HistogramItem', 'HistogramItem', (['curveparam', 'histparam'], {}), '(curveparam, histparam)\n', (21294, 21317), False, 'from guiqwt.histogram import HistogramItem, lut_range_threshold\n'), ((25244, 25266), 'guiqwt.image.ImageItem', 'ImageItem', (['data', 'param'], {}), '(data, param)\n', (25253, 25266), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((27326, 27360), 'guiqwt.image.MaskedImageItem', 'MaskedImageItem', (['data', 'mask', 'param'], {}), '(data, mask, param)\n', (27341, 27360), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((28958, 28983), 'guiqwt.image.RGBImageItem', 'RGBImageItem', (['data', 'param'], {}), '(data, param)\n', (28970, 28983), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((29623, 29651), 'guiqwt.image.QuadGridItem', 'QuadGridItem', (['X', 'Y', 'Z', 'param'], {}), '(X, Y, Z, param)\n', (29635, 29651), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((31880, 31904), 'guiqwt.image.TrImageItem', 'TrImageItem', (['data', 'param'], {}), '(data, param)\n', (31891, 31904), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((33434, 33464), 'guiqwt.image.XYImageItem', 'XYImageItem', (['x', 'y', 'data', 'param'], {}), '(x, y, data, param)\n', (33445, 33464), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((35036, 35053), 'guiqwt.config._', '_', (['"""2D Histogram"""'], {}), "('2D Histogram')\n", (35037, 35053), False, 'from guiqwt.config import _, CONF, make_title\n'), ((35070, 35126), 'guiqwt.styles.Histogram2DParam', 'Histogram2DParam', ([], {'title': 'basename', 'icon': '"""histogram2d.png"""'}), "(title=basename, icon='histogram2d.png')\n", (35086, 35126), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((35712, 35745), 'guiqwt.image.Histogram2DItem', 'Histogram2DItem', (['X', 'Y', 'param'], {'Z': 'Z'}), '(X, Y, param, Z=Z)\n', (35727, 35745), False, 'from guiqwt.image import ImageItem, QuadGridItem, TrImageItem, XYImageItem, Histogram2DItem, RGBImageItem, MaskedImageItem\n'), ((36410, 36420), 'guiqwt.config._', '_', (['"""Label"""'], {}), "('Label')\n", (36411, 36420), False, 'from guiqwt.config import _, CONF, make_title\n'), ((36437, 36487), 'guiqwt.styles.LabelParamWithContents', 'LabelParamWithContents', (['basename'], {'icon': '"""label.png"""'}), "(basename, icon='label.png')\n", (36459, 36487), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((37032, 37054), 'guiqwt.label.LabelItem', 'LabelItem', (['text', 'param'], {}), '(text, param)\n', (37041, 37054), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((38126, 38153), 'guiqwt.shapes.XRangeSelection', 'XRangeSelection', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (38141, 38153), False, 'from guiqwt.shapes import XRangeSelection, RectangleShape, EllipseShape, SegmentShape, Marker\n'), ((42840, 42913), 'guiqwt.shapes.Marker', 'Marker', ([], {'label_cb': 'label_cb', 'constraint_cb': 'constraint_cb', 'markerparam': 'param'}), '(label_cb=label_cb, constraint_cb=constraint_cb, markerparam=param)\n', (42846, 42913), False, 'from guiqwt.shapes import XRangeSelection, RectangleShape, EllipseShape, SegmentShape, Marker\n'), ((44065, 44093), 'guiqwt.shapes.EllipseShape', 'EllipseShape', (['x0', 'y0', 'x1', 'y1'], {}), '(x0, y0, x1, y1)\n', (44077, 44093), False, 'from guiqwt.shapes import XRangeSelection, RectangleShape, EllipseShape, SegmentShape, Marker\n'), ((46306, 46352), 'guiqwt.annotations.AnnotatedEllipse', 'AnnotatedEllipse', (['x0', 'y0', 'x1', 'y1', 'ratio', 'param'], {}), '(x0, y0, x1, y1, ratio, param)\n', (46322, 46352), False, 'from guiqwt.annotations import AnnotatedRectangle, AnnotatedEllipse, AnnotatedSegment\n'), ((47394, 47410), 'guiqwt.config._', '_', (['"""Computation"""'], {}), "('Computation')\n", (47395, 47410), False, 'from guiqwt.config import _, CONF, make_title\n'), ((47427, 47465), 'guiqwt.styles.LabelParam', 'LabelParam', (['basename'], {'icon': '"""label.png"""'}), "(basename, icon='label.png')\n", (47437, 47465), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((47883, 47910), 'guiqwt.label.DataInfoLabel', 'DataInfoLabel', (['param', 'comps'], {}), '(param, comps)\n', (47896, 47910), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((48573, 48606), 'guiqwt.label.RangeInfo', 'RangeInfo', (['label', 'range', 'function'], {}), '(label, range, function)\n', (48582, 48606), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((4256, 4272), 'guiqwt.styles.LineStyleParam', 'LineStyleParam', ([], {}), '()\n', (4270, 4272), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((4406, 4430), 'guiqwt.styles.COLORS.get', 'COLORS.get', (['color', 'color'], {}), '(color, color)\n', (4416, 4430), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((4503, 4519), 'guiqwt.styles.LineStyleParam', 'LineStyleParam', ([], {}), '()\n', (4517, 4519), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((4653, 4677), 'guiqwt.styles.COLORS.get', 'COLORS.get', (['color', 'color'], {}), '(color, color)\n', (4663, 4677), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((6204, 6228), 'guiqwt.styles.COLORS.get', 'COLORS.get', (['color', 'color'], {}), '(color, color)\n', (6214, 6228), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((6798, 6842), 'guiqwt.styles.COLORS.get', 'COLORS.get', (['markerfacecolor', 'markerfacecolor'], {}), '(markerfacecolor, markerfacecolor)\n', (6808, 6842), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((7012, 7056), 'guiqwt.styles.COLORS.get', 'COLORS.get', (['markeredgecolor', 'markeredgecolor'], {}), '(markeredgecolor, markeredgecolor)\n', (7022, 7056), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((8518, 8541), 'qtpy.py3compat.is_text_string', 'is_text_string', (['args[0]'], {}), '(args[0])\n', (8532, 8541), False, 'from qtpy.py3compat import is_text_string\n'), ((9381, 9389), 'numpy.array', 'array', (['x'], {}), '(x)\n', (9386, 9389), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((9464, 9472), 'numpy.array', 'array', (['y'], {}), '(y)\n', (9469, 9472), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((11261, 11305), 'guiqwt.styles.CurveParam', 'CurveParam', ([], {'title': 'basename', 'icon': '"""curve.png"""'}), "(title=basename, icon='curve.png')\n", (11271, 11305), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((11552, 11584), 'guiqwt.styles.update_style_attr', 'update_style_attr', (['stylei', 'param'], {}), '(stylei, param)\n', (11569, 11584), False, 'from guiqwt.styles import update_style_attr, CurveParam, ErrorBarParam, style_generator, LabelParam, LegendParam, ImageParam, TrImageParam, HistogramParam, Histogram2DParam, RGBImageParam, MaskedImageParam, XYImageParam, ImageFilterParam, MARKERS, COLORS, GridParam, LineStyleParam, AnnotationParam, QuadGridParam, LabelParamWithContents, MarkerParam\n'), ((14119, 14152), 'guiqwt.config.make_title', 'make_title', (['basename', 'CURVE_COUNT'], {}), '(basename, CURVE_COUNT)\n', (14129, 14152), False, 'from guiqwt.config import _, CONF, make_title\n'), ((15206, 15239), 'guiqwt.config.make_title', 'make_title', (['basename', 'CURVE_COUNT'], {}), '(basename, CURVE_COUNT)\n', (15216, 15239), False, 'from guiqwt.config import _, CONF, make_title\n'), ((18931, 18964), 'guiqwt.config.make_title', 'make_title', (['basename', 'CURVE_COUNT'], {}), '(basename, CURVE_COUNT)\n', (18941, 18964), False, 'from guiqwt.config import _, CONF, make_title\n'), ((20310, 20320), 'guiqwt.config._', '_', (['"""Curve"""'], {}), "('Curve')\n", (20311, 20320), False, 'from guiqwt.config import _, CONF, make_title\n'), ((20509, 20546), 'guiqwt.config.make_title', 'make_title', (['basename', 'HISTOGRAM_COUNT'], {}), '(basename, HISTOGRAM_COUNT)\n', (20519, 20546), False, 'from guiqwt.config import _, CONF, make_title\n'), ((22407, 22453), 'guiqwt.io.imread', 'io.imread', (['filename'], {'to_grayscale': 'to_grayscale'}), '(filename, to_grayscale=to_grayscale)\n', (22416, 22453), False, 'from guiqwt import io\n'), ((22525, 22547), 'os.path.basename', 'osp.basename', (['filename'], {}), '(filename)\n', (22537, 22547), True, 'import os.path as osp\n'), ((33343, 33351), 'numpy.array', 'array', (['x'], {}), '(x)\n', (33348, 33351), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((33409, 33417), 'numpy.array', 'array', (['y'], {}), '(y)\n', (33414, 33417), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((34027, 34038), 'guiqwt.config._', '_', (['"""Filter"""'], {}), "('Filter')\n", (34028, 34038), False, 'from guiqwt.config import _, CONF, make_title\n'), ((35493, 35532), 'guiqwt.config.make_title', 'make_title', (['basename', 'HISTOGRAM2D_COUNT'], {}), '(basename, HISTOGRAM2D_COUNT)\n', (35503, 35532), False, 'from guiqwt.config import _, CONF, make_title\n'), ((36687, 36720), 'guiqwt.config.make_title', 'make_title', (['basename', 'LABEL_COUNT'], {}), '(basename, LABEL_COUNT)\n', (36697, 36720), False, 'from guiqwt.config import _, CONF, make_title\n'), ((37664, 37675), 'guiqwt.config._', '_', (['"""Legend"""'], {}), "('Legend')\n", (37665, 37675), False, 'from guiqwt.config import _, CONF, make_title\n'), ((37978, 37998), 'guiqwt.label.LegendBoxItem', 'LegendBoxItem', (['param'], {}), '(param)\n', (37991, 37998), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((38032, 38076), 'guiqwt.label.SelectedLegendBoxItem', 'SelectedLegendBoxItem', (['param', 'restrict_items'], {}), '(param, restrict_items)\n', (38053, 38076), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((41784, 41795), 'guiqwt.config._', '_', (['"""Marker"""'], {}), "('Marker')\n", (41785, 41795), False, 'from guiqwt.config import _, CONF, make_title\n'), ((44976, 44991), 'guiqwt.config._', '_', (['"""Annotation"""'], {}), "('Annotation')\n", (44977, 44991), False, 'from guiqwt.config import _, CONF, make_title\n'), ((47682, 47715), 'guiqwt.config.make_title', 'make_title', (['basename', 'LABEL_COUNT'], {}), '(basename, LABEL_COUNT)\n', (47692, 47715), False, 'from guiqwt.config import _, CONF, make_title\n'), ((49466, 49513), 'guiqwt.label.RangeComputation', 'RangeComputation', (['label', 'curve', 'range', 'function'], {}), '(label, curve, range, function)\n', (49482, 49513), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((50578, 50626), 'guiqwt.label.RangeComputation2d', 'RangeComputation2d', (['label', 'image', 'rect', 'function'], {}), '(label, image, rect, function)\n', (50596, 50626), False, 'from guiqwt.label import LabelItem, LegendBoxItem, RangeComputation, RangeComputation2d, DataInfoLabel, RangeInfo, SelectedLegendBoxItem\n'), ((3810, 3819), 'guiqwt.config._', '_', (['"""Grid"""'], {}), "('Grid')\n", (3811, 3819), False, 'from guiqwt.config import _, CONF, make_title\n'), ((8193, 8204), 'numpy.array', 'array', (['data'], {}), '(data)\n', (8198, 8204), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((8281, 8298), 'numpy.arange', 'arange', (['data.size'], {}), '(data.size)\n', (8287, 8298), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((8563, 8579), 'numpy.array', 'array', (['()', 'float'], {}), '((), float)\n', (8568, 8579), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((8600, 8616), 'numpy.array', 'array', (['()', 'float'], {}), '((), float)\n', (8605, 8616), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((8996, 9014), 'qtpy.py3compat.is_text_string', 'is_text_string', (['a2'], {}), '(a2)\n', (9010, 9014), False, 'from qtpy.py3compat import is_text_string\n'), ((9834, 9852), 'qtpy.py3compat.is_text_string', 'is_text_string', (['a3'], {}), '(a3)\n', (9848, 9852), False, 'from qtpy.py3compat import is_text_string\n'), ((11506, 11539), 'guiqwt.config.make_title', 'make_title', (['basename', 'CURVE_COUNT'], {}), '(basename, CURVE_COUNT)\n', (11516, 11539), False, 'from guiqwt.config import _, CONF, make_title\n'), ((14986, 15001), 'guiqwt.config._', '_', (['"""Error bars"""'], {}), "('Error bars')\n", (14987, 15001), False, 'from guiqwt.config import _, CONF, make_title\n'), ((18780, 18795), 'guiqwt.config._', '_', (['"""Error bars"""'], {}), "('Error bars')\n", (18781, 18795), False, 'from guiqwt.config import _, CONF, make_title\n'), ((21717, 21727), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (21718, 21727), False, 'from guiqwt.config import _, CONF, make_title\n'), ((24027, 24037), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (24028, 24037), False, 'from guiqwt.config import _, CONF, make_title\n'), ((25379, 25430), 'guiqwt.histogram.lut_range_threshold', 'lut_range_threshold', (['image', '(256)', 'eliminate_outliers'], {}), '(image, 256, eliminate_outliers)\n', (25398, 25430), False, 'from guiqwt.histogram import HistogramItem, lut_range_threshold\n'), ((26275, 26285), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (26276, 26285), False, 'from guiqwt.config import _, CONF, make_title\n'), ((27473, 27524), 'guiqwt.histogram.lut_range_threshold', 'lut_range_threshold', (['image', '(256)', 'eliminate_outliers'], {}), '(image, 256, eliminate_outliers)\n', (27492, 27524), False, 'from guiqwt.histogram import HistogramItem, lut_range_threshold\n'), ((28129, 28139), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (28130, 28139), False, 'from guiqwt.config import _, CONF, make_title\n'), ((29457, 29467), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (29458, 29467), False, 'from guiqwt.config import _, CONF, make_title\n'), ((30067, 30091), 'numpy.arange', 'arange', (['N'], {'dtype': 'Z.dtype'}), '(N, dtype=Z.dtype)\n', (30073, 30091), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((30093, 30117), 'numpy.arange', 'arange', (['M'], {'dtype': 'Z.dtype'}), '(M, dtype=Z.dtype)\n', (30099, 30117), False, 'from numpy import arange, array, zeros, meshgrid, ndarray\n'), ((31307, 31317), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (31308, 31317), False, 'from guiqwt.config import _, CONF, make_title\n'), ((32017, 32068), 'guiqwt.histogram.lut_range_threshold', 'lut_range_threshold', (['image', '(256)', 'eliminate_outliers'], {}), '(image, 256, eliminate_outliers)\n', (32036, 32068), False, 'from guiqwt.histogram import HistogramItem, lut_range_threshold\n'), ((32947, 32957), 'guiqwt.config._', '_', (['"""Image"""'], {}), "('Image')\n", (32948, 32957), False, 'from guiqwt.config import _, CONF, make_title\n'), ((33540, 33591), 'guiqwt.histogram.lut_range_threshold', 'lut_range_threshold', (['image', '(256)', 'eliminate_outliers'], {}), '(image, 256, eliminate_outliers)\n', (33559, 33591), False, 'from guiqwt.histogram import HistogramItem, lut_range_threshold\n'), ((10186, 10204), 'qtpy.py3compat.is_text_string', 'is_text_string', (['a4'], {}), '(a4)\n', (10200, 10204), False, 'from qtpy.py3compat import is_text_string\n')] |
import numpy as np
import math
def quantization(sample, sf, ba, QCa, QCb):
"""
Arguments:
sample: the sample to quantize
sf: the scale factor
ba: the bit allocation
QCa: the multiplicative uniform quantization parameter
QCb: the additive uniform quantization parameter
Returns:
The uniformly quantized sample.
"""
power = math.pow(2, (ba-1))
scaled = sample/sf
q = np.floor((QCa*scaled + QCb)*power)
return q | [
"numpy.floor",
"math.pow"
] | [((418, 437), 'math.pow', 'math.pow', (['(2)', '(ba - 1)'], {}), '(2, ba - 1)\n', (426, 437), False, 'import math\n'), ((469, 507), 'numpy.floor', 'np.floor', (['((QCa * scaled + QCb) * power)'], {}), '((QCa * scaled + QCb) * power)\n', (477, 507), True, 'import numpy as np\n')] |
"""AtomNumber module.
An ndarray to store atom densities with string, integer, or slice indexing.
"""
import numpy as np
class AtomNumber(object):
""" AtomNumber module.
An ndarray to store atom densities with string, integer, or slice indexing.
Parameters
----------
mat_to_ind : OrderedDict of str to int
A dictionary mapping material ID as string to index.
nuc_to_ind : OrderedDict of str to int
A dictionary mapping nuclide name as string to index.
volume : OrderedDict of int to float
Volume of geometry.
n_mat_burn : int
Number of materials to be burned.
n_nuc_burn : int
Number of nuclides to be burned.
Attributes
----------
mat_to_ind : OrderedDict of str to int
A dictionary mapping cell ID as string to index.
nuc_to_ind : OrderedDict of str to int
A dictionary mapping nuclide name as string to index.
volume : numpy.array
Volume of geometry indexed by mat_to_ind. If a volume is not found,
it defaults to 1 so that reading density still works correctly.
n_mat_burn : int
Number of materials to be burned.
n_nuc_burn : int
Number of nuclides to be burned.
n_mat : int
Number of materials.
n_nuc : int
Number of nucs.
number : numpy.array
Array storing total atoms indexed by the above dictionaries.
burn_nuc_list : list of str
A list of all nuclide material names. Used for sorting the simulation.
burn_mat_list : list of str
A list of all burning material names. Used for sorting the simulation.
"""
def __init__(self, mat_to_ind, nuc_to_ind, volume, n_mat_burn, n_nuc_burn):
self.mat_to_ind = mat_to_ind
self.nuc_to_ind = nuc_to_ind
self.volume = np.ones(self.n_mat)
for mat in volume:
if str(mat) in self.mat_to_ind:
ind = self.mat_to_ind[str(mat)]
self.volume[ind] = volume[mat]
self.n_mat_burn = n_mat_burn
self.n_nuc_burn = n_nuc_burn
self.number = np.zeros((self.n_mat, self.n_nuc))
# For performance, create storage for burn_nuc_list, burn_mat_list
self._burn_nuc_list = None
self._burn_mat_list = None
def __getitem__(self, pos):
""" Retrieves total atom number from AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
Returns
-------
numpy.array
The value indexed from self.number.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
if isinstance(nuc, str):
nuc = self.nuc_to_ind[nuc]
return self.number[mat, nuc]
def __setitem__(self, pos, val):
""" Sets total atom number into AtomNumber.
Parameters
----------
pos : tuple
A two-length tuple containing a material index and a nuc index.
These indexes can be strings (which get converted to integers via
the dictionaries), integers used directly, or slices.
val : float
The value to set the array to.
"""
mat, nuc = pos
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
if isinstance(nuc, str):
nuc = self.nuc_to_ind[nuc]
self.number[mat, nuc] = val
def get_atom_density(self, mat, nuc):
""" Accesses atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
Returns
-------
numpy.array
The density indexed.
"""
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
if isinstance(nuc, str):
nuc = self.nuc_to_ind[nuc]
return self[mat, nuc] / self.volume[mat]
def set_atom_density(self, mat, nuc, val):
""" Sets atom density instead of total number.
Parameters
----------
mat : str, int or slice
Material index.
nuc : str, int or slice
Nuclide index.
val : numpy.array
Array of values to set.
"""
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
if isinstance(nuc, str):
nuc = self.nuc_to_ind[nuc]
self[mat, nuc] = val * self.volume[mat]
def get_mat_slice(self, mat):
""" Gets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
Returns
-------
numpy.array
The slice requested.
"""
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
return self[mat, 0:self.n_nuc_burn]
def set_mat_slice(self, mat, val):
""" Sets atom quantity indexed by mats for all burned nuclides
Parameters
----------
mat : str, int or slice
Material index.
val : numpy.array
The slice to set.
"""
if isinstance(mat, str):
mat = self.mat_to_ind[mat]
self[mat, 0:self.n_nuc_burn] = val
@property
def n_mat(self):
"""Number of cells."""
return len(self.mat_to_ind)
@property
def n_nuc(self):
"""Number of nucs."""
return len(self.nuc_to_ind)
@property
def burn_nuc_list(self):
""" burn_nuc_list : list of str
A list of all nuclide material names. Used for sorting the simulation.
"""
if self._burn_nuc_list is not None:
return self._burn_nuc_list
self._burn_nuc_list = [None] * self.n_nuc_burn
for nuc in self.nuc_to_ind:
ind = self.nuc_to_ind[nuc]
if ind < self.n_nuc_burn:
self._burn_nuc_list[ind] = nuc
return self._burn_nuc_list
@property
def burn_mat_list(self):
""" burn_mat_list : list of str
A list of all burning material names. Used for sorting the simulation.
"""
if self._burn_mat_list is not None:
return self._burn_mat_list
self._burn_mat_list = [None] * self.n_mat_burn
for mat in self.mat_to_ind:
ind = self.mat_to_ind[mat]
if ind < self.n_mat_burn:
self._burn_mat_list[ind] = mat
return self._burn_mat_list
| [
"numpy.zeros",
"numpy.ones"
] | [((1816, 1835), 'numpy.ones', 'np.ones', (['self.n_mat'], {}), '(self.n_mat)\n', (1823, 1835), True, 'import numpy as np\n'), ((2101, 2135), 'numpy.zeros', 'np.zeros', (['(self.n_mat, self.n_nuc)'], {}), '((self.n_mat, self.n_nuc))\n', (2109, 2135), True, 'import numpy as np\n')] |
import logging
import math
import os
import random
from functools import partial
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import scipy
import seaborn as sns
import torch
from scipy.interpolate import griddata
from skorch.dataset import unpack_data
from torchvision.utils import make_grid
from npf import GridConvCNP, LatentNeuralProcessFamily
from npf.utils.datasplit import GridCntxtTrgtGetter
from npf.utils.helpers import MultivariateNormalDiag, channels_to_2nd_dim, prod
from npf.utils.predict import SamplePredictor
from utils.data import cntxt_trgt_collate
from utils.helpers import mean, set_seed, tuple_cont_to_cont_tuple
from utils.train import EVAL_FILENAME
__all__ = [
"plot_dataset_samples_imgs",
"plot_qualitative_with_kde",
"get_posterior_samples",
"plot_posterior_samples",
"plot_img_marginal_pred",
]
DFLT_FIGSIZE = (17, 9)
logger = logging.getLogger(__name__)
def plot_dataset_samples_imgs(
dataset, n_plots=4, figsize=DFLT_FIGSIZE, ax=None, pad_value=1, seed=123, title=None
):
"""Plot `n_samples` samples of the a datset."""
set_seed(seed)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
img_tensor = torch.stack(
[dataset[random.randint(0, len(dataset) - 1)][0] for i in range(n_plots)], dim=0
)
grid = make_grid(img_tensor, nrow=2, pad_value=pad_value)
ax.imshow(grid.permute(1, 2, 0).numpy())
ax.axis("off")
if title is not None:
ax.set_title(title)
def get_posterior_samples(
data,
get_cntxt_trgt,
model,
is_uniform_grid=True,
img_indcs=None,
n_plots=4,
seed=123,
n_samples=3, # if None selects all
is_select_different=False,
):
set_seed(seed)
model.eval()
dim_grid = 2 if is_uniform_grid else 1
if isinstance(get_cntxt_trgt, dict):
device = next(model.parameters()).device
mask_cntxt = get_cntxt_trgt["X_cntxt"].to(device)
Y_cntxt = get_cntxt_trgt["Y_cntxt"].to(device)
mask_trgt = get_cntxt_trgt["X_trgt"].to(device)
# Y_trgt = get_cntxt_trgt["Y_trgt"].to(device)
n_plots = mask_cntxt.size(0)
else:
if img_indcs is None:
img_indcs = [random.randint(0, len(data) - 1) for _ in range(n_plots)]
n_plots = len(img_indcs)
imgs = [data[i] for i in img_indcs]
cntxt_trgt = cntxt_trgt_collate(
get_cntxt_trgt, is_return_masks=is_uniform_grid
)(imgs)[0]
mask_cntxt, Y_cntxt, mask_trgt, _ = (
cntxt_trgt["X_cntxt"],
cntxt_trgt["Y_cntxt"],
cntxt_trgt["X_trgt"],
cntxt_trgt["Y_trgt"],
)
y_pred = SamplePredictor(model, is_dist=True)(mask_cntxt, Y_cntxt, mask_trgt)
if is_select_different:
# select the most different in average pixel L2 distance
keep_most_different_samples_(y_pred, n_samples)
elif isinstance(n_samples, int):
# select first n_samples
y_pred.base_dist.loc = y_pred.base_dist.loc[:n_samples, ...]
y_pred.base_dist.scale = y_pred.base_dist.scale[:n_samples, ...]
elif n_samples is None:
pass # select all
else:
ValueError(f"Unkown n_samples={n_samples}.")
return y_pred, mask_cntxt, Y_cntxt, mask_trgt
# TO DOC
def plot_img_marginal_pred(
model,
data,
get_cntxt_trgt,
figsize=(11, 5),
n_samples=5,
is_uniform_grid=True,
seed=123,
n_plots_loop=1,
wspace=0.3,
n_marginals=5,
n_columns=2,
**kwargs,
):
f, (ax0, ax1) = plt.subplots(
1, 2, gridspec_kw={"width_ratios": [1, 1], "wspace": wspace}, figsize=figsize
)
predictive_all, mask_cntxt, X, mask_trgt = get_posterior_samples(
data,
get_cntxt_trgt,
model,
n_plots=n_plots_loop,
is_uniform_grid=is_uniform_grid,
seed=seed,
n_samples=None,
)
if predictive_all.base_dist.loc.shape[0] == 1:
logger.warning(
"There's only a single sample of the posterior predictive, we treat it as the marginal."
)
arange = torch.linspace(0, 1, 1000)
if is_uniform_grid:
arange_marg = arange.view(1, 1000, 1, 1, 1)
else:
arange_marg = arange.view(1, 1000, 1, 1)
best = float("inf")
for i in range(n_plots_loop):
predictive = MultivariateNormalDiag(
predictive_all.base_dist.loc[:, i : i + 1, ...],
predictive_all.base_dist.scale[:, i : i + 1, ...],
)
out = (
marginal_log_like(predictive, arange_marg)
.detach()
.reshape(1000, -1)
.numpy()
)
new_sarle = sarle(out)
if np.median(new_sarle) < best:
best_out = out
best_sarles = new_sarle
best_mean_ys = predictive.base_dist.loc.detach()[:n_samples, ...]
best_mask_cntxt = mask_cntxt[i : i + 1, ...]
best_X = X[i : i + 1, ...]
best_mask_trgt = mask_trgt[i : i + 1, ...]
best = np.median(best_sarles)
best_pred = predictive
best_pred.base_dist.loc = predictive.base_dist.loc[:n_samples, ...]
best_pred.base_dist.scale = predictive.base_dist.scale[:n_samples, ...]
idx = np.argsort(best_sarles)[:n_marginals]
ax1.plot(arange, best_out[:, idx], alpha=0.7)
sns.despine(top=True, right=True, left=False, bottom=False)
ax1.set_yticks([])
ax1.set_ylabel("Marginal Predictive")
ax1.set_xlabel("Pixel Intensity")
ax1.set_xlim(-0.1, 1)
ax1.set_xticks([0, 0.5, 1])
plot_posterior_samples(
data,
get_cntxt_trgt,
model,
is_uniform_grid=is_uniform_grid,
seed=seed,
n_samples=n_samples,
ax=ax0,
outs=[best_pred, best_mask_cntxt, best_X, best_mask_trgt],
is_add_annot=False,
n_plots=n_columns,
**kwargs,
)
return f
def plot_posterior_samples(
data,
get_cntxt_trgt,
model,
is_uniform_grid=True,
img_indcs=None,
n_plots=4,
imgsize=(7, 4),
ax=None,
seed=123,
is_return=False,
is_hrztl_cat=False,
n_samples=1,
outs=None,
is_select_different=False,
is_plot_std=False,
interp_baselines=[],
is_add_annot=True,
rotate_annot=None,
is_mask_cntxt=True,
labels=dict(mean="Pred. Mean", std="Pred. Std.", baseline="{baseline} Interp."),
):
"""
Plot the mean of the estimated posterior for images.
Parameters
----------
data : Dataset
Dataset from which to sample the images.
get_cntxt_trgt : callable or dict
Function that takes as input the features and tagrets `X`, `y` and return
the corresponding `X_cntxt, Y_cntxt, X_trgt, Y_trgt`. If dict should contain the correct
`X_cntxt, Y_cntxt, X_trgt, Y_trgt`.
model : nn.Module
Model used for plotting.
is_uniform_grid : bool, optional
Whether the input are the image and corresponding masks rather than
the slected pixels. Typically used for `GridConvCNP`.
img_indcs : list of int, optional
Indices of the images to plot. If `None` will randomly sample `n_plots`
of them.
n_plots : int, optional
Number of images to samples. They will be plotted in different columns.
Only used if `img_indcs` is `None`.
imgsize : tuple, optional
Figsize for each subimage. Will be multiplied by the number of plotted images.
ax : plt.axes.Axes, optional
seed : int, optional
is_return : bool, optional
Whether to return the grid instead of plotting it.
is_hrztl_cat : bool, optional
Whether to concatenate the plots horizontally instead of vertically. Only works well for
n_plots=1.
n_samples : int, optional
Number of samples to plot.
outs : tuple of tensors, optional
Samples `(y_pred, mask_cntxt, Y_cntxt, mask_trgt)` to plot instead of using `get_posterior_samples`.
is_select_different : bool, optional
Whether to select the `n_samples` most different samples (in average L2 dist) instead of random.
is_plot_std : bool, optional
Whether to plot the standard deviation of the posterior predictive instead of only the mean.
Note that the std is the average std across channels and is only shown for the last sample.
interp_baselines : list of {"linear","nearest","cubic"}, optional
List of interpolating baselines to plot in addition to the prediction from the model.
is_add_annot : bool, optional
Whether to add annotations *context, mean, ...).
rotate_annot : float or {'vertical', 'horizontal'} or str, optional
Rotation of annotation. If None automatic.
is_mask_cntxt : bool, optional
Whether to mask the context. If false plors the entire image, this is especially usefull
when doing superresolution as all the image corresponds to the downscaled image. In this
case, `get_cntxt_trgt` should be `SuperresolutionCntxtTrgtGetter`.
labels : dict, optional
Labels to use.
"""
if outs is None:
y_pred, mask_cntxt, X, mask_trgt = get_posterior_samples(
data,
get_cntxt_trgt,
model,
is_uniform_grid=is_uniform_grid,
img_indcs=img_indcs,
n_plots=n_plots,
seed=seed,
n_samples=n_samples,
is_select_different=is_select_different,
)
else:
y_pred, mask_cntxt, X, mask_trgt = outs
if n_samples > 1 and not isinstance(model, LatentNeuralProcessFamily):
if is_plot_std:
raise ValueError("Cannot plot std when sampling from a CNPF.")
# sampling for CNPFS
mean_ys = y_pred.sample_n(n_samples)[:, 0, ...]
else:
mean_ys = y_pred.base_dist.loc
mean_y = mean_ys[0]
if n_samples > mean_ys.size(0):
raise ValueError(
f"Trying to plot more samples {n_samples} than the number of latent samples {mean_ys.size(0)}."
)
if isinstance(get_cntxt_trgt, dict):
n_plots = get_cntxt_trgt["X_cntxt"].size(0)
dim_grid = 2 if is_uniform_grid else 1
if is_uniform_grid:
mean_ys = mean_ys.view(n_samples, *X.shape)
if X.shape[-1] == 1:
X = X.expand(-1, *[-1] * dim_grid, 3)
mean_ys = mean_ys.expand(n_samples, -1, *[-1] * dim_grid, 3)
# make sure uses 3 channels
std_ys = y_pred.base_dist.scale.expand(*mean_ys.shape)
out_cntxt, mask_cntxt = get_img_toplot(
data,
X,
mask_cntxt,
is_uniform_grid,
downscale_factor=get_downscale_factor(get_cntxt_trgt),
is_mask=is_mask_cntxt,
)
outs = [out_cntxt]
y_ticks_labels = ["Context"]
for i in range(n_samples):
out_pred, _ = get_img_toplot(
data,
mean_ys[i],
mask_trgt,
is_uniform_grid,
downscale_factor=get_downscale_factor(get_cntxt_trgt),
)
outs.append(out_pred)
if n_samples > 1:
y_ticks_labels += [f"Sample {i+1}"]
else:
y_ticks_labels += [labels["mean"]]
if is_plot_std:
out_std, _ = get_img_toplot(
data,
std_ys[n_samples - 1], # only plot last std
mask_trgt,
is_uniform_grid,
downscale_factor=get_downscale_factor(get_cntxt_trgt),
)
outs.append(out_std)
if n_samples > 1:
y_ticks_labels += [f"Std {n_samples}"]
else:
y_ticks_labels += [labels["std"]]
for interp in interp_baselines:
# removing batch and channel from mask
out_interps = []
# loop over all context plots
for i in range(mask_cntxt.shape[0]):
single_mask_cntxt = mask_cntxt[i, :, :, 0]
coord_y, coord_x = single_mask_cntxt.nonzero().unbind(1)
grid_x, grid_y = np.meshgrid(
np.arange(0, out_cntxt.shape[2]), np.arange(0, out_cntxt.shape[1])
)
out_interp = griddata(
(coord_x, coord_y),
out_cntxt[i, coord_y, coord_x],
(grid_x, grid_y),
method=interp,
)
out_interps.append(torch.from_numpy(out_interp))
out_interp = torch.stack(out_interps, dim=0)
outs.append(out_interp)
y_ticks_labels += [labels["baseline"].format(baseline=interp.title())]
outs = channels_to_2nd_dim(torch.cat(outs, dim=0)).detach()
if is_hrztl_cat:
tmp = []
for i in range(n_plots):
tmp.extend(outs[i::n_plots])
outs = tmp
n_fig_per_row = n_plots
n_fig_per_col = len(y_ticks_labels)
if is_hrztl_cat:
n_fig_per_row, n_fig_per_col = n_fig_per_col, n_fig_per_row
grid = make_grid(outs, nrow=n_fig_per_row, pad_value=1.0)
if is_return:
return grid
if ax is None:
figsize = (imgsize[0] * n_fig_per_row, imgsize[1] * n_fig_per_col)
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(grid.permute(1, 2, 0).numpy())
if is_add_annot:
idx_text = 2 if is_hrztl_cat else 1
middle_img = data.shape[idx_text] // 2 + 1 # half height
y_ticks = [middle_img * (2 * i + 1) for i in range(len(y_ticks_labels))]
if is_hrztl_cat:
if rotate_annot is None:
rotate_annot = 20
# to test
ax.xaxis.set_major_locator(ticker.FixedLocator(y_ticks))
ax.set_xticklabels(y_ticks_labels, rotation=rotate_annot, ha="right")
ax.set_yticks([])
else:
if rotate_annot is None:
rotate_annot = "vertical"
ax.yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
ax.set_yticklabels(y_ticks_labels, rotation=rotate_annot, va="center")
ax.set_xticks([])
remove_axis(ax)
else:
ax.axis("off")
# TO CLEAN
def plot_qualitative_with_kde(
named_trainer,
dataset,
named_trainer_compare=None,
n_images=8,
percentiles=None, # if None uses uniform linspace from n_images
figsize=DFLT_FIGSIZE,
title=None,
seed=123,
height_ratios=[1, 3],
font_size=12,
h_pad=-3,
x_lim={},
is_smallest_xrange=False,
kdeplot_kwargs={},
n_samples=1,
upscale_factor=1,
**kwargs,
):
"""
Plot qualitative samples using `plot_posterior_samples` but select the samples and mask to plot
given the score at test time.
Parameters
----------
named_trainer : list [name, NeuralNet]
Trainer (model outputted of training) and the name under which it should be displayed.
dataset :
named_trainer_compare : list [name, NeuralNet], optional
Like `named_trainer` but for a model against which to compare.
n_images : int, optional
Number of images to plot (at uniform interval of log like). Only used if `percentiles` is None.
percentiles : list of float, optional
Percentiles of log likelihood of the main model for which to select an image. The length
of the list will correspond to the number fo images.
figsize : tuple, optional
title : str, optional
seed : int, optional
height_ratios : int iterable of length = nrows, optional
Height ratios of the rows.
font_size : int, optional
h_pad : int, optional
Padding between kde plot and images
x_lim : dict, optional
Dictionary containing one (or both) of "left", "right" correspomding to the x limit of kde plot.
is_smallest_xrange : bool, optional
Whether to rescale the x axis based on the range of percentils.
kdeplot_kwargs : dict, optional
Additional arguments to `sns.kdeplot`
upscale_factor : float, optional
Whether to upscale the image => extrapolation. Only if not uniform grid.
kwargs
!VERY DIRTY
"""
kwargs["n_samples"] = n_samples
kwargs["is_plot_std"] = False
kwargs["is_add_annot"] = False
if percentiles is not None:
n_images = len(percentiles)
plt.rcParams.update({"font.size": font_size})
fig, axes = plt.subplots(
2, 1, figsize=figsize, gridspec_kw={"height_ratios": height_ratios}
)
# a dictionary that has "upscale_factor" which is needed for downscaling when plotting
# only is not grided
CntxtTrgtDictUpscale = partial(CntxtTrgtDict, upscale_factor=upscale_factor)
def _plot_kde_loglike(name, trainer):
chckpnt_dirname = dict(trainer.callbacks_)["Checkpoint"].dirname
test_eval_file = os.path.join(chckpnt_dirname, EVAL_FILENAME)
test_loglike = np.loadtxt(test_eval_file, delimiter=",")
sns.kdeplot(
test_loglike, ax=axes[0], shade=True, label=name, cut=0, **kdeplot_kwargs
)
sns.despine()
return test_loglike
def _grid_to_points(selected_data):
cntxt_trgt_getter = GridCntxtTrgtGetter(upscale_factor=upscale_factor)
for i in range(n_images):
X = selected_data["Y_cntxt"][i]
X_cntxt, Y_cntxt = cntxt_trgt_getter.select(
X, None, selected_data["X_cntxt"][i]
)
X_trgt, Y_trgt = cntxt_trgt_getter.select(
X, None, selected_data["X_trgt"][i]
)
yield CntxtTrgtDictUpscale(
X_cntxt=X_cntxt, Y_cntxt=Y_cntxt, X_trgt=X_trgt, Y_trgt=Y_trgt
)
def _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer):
is_uniform_grid = isinstance(trainer.module_, GridConvCNP)
kwargs["img_indcs"] = []
kwargs["is_uniform_grid"] = is_uniform_grid
kwargs["is_return"] = True
if not is_uniform_grid:
if is_grided_trainer:
grids = [
plot_posterior_samples(
dataset, data, trainer.module_.cpu(), **kwargs
)
for i, data in enumerate(_grid_to_points(selected_data))
]
else:
grids = [
plot_posterior_samples(
dataset,
CntxtTrgtDictUpscale(
**{k: v[i] for k, v in selected_data.items()}
),
trainer.module_.cpu(),
**kwargs,
)
for i in range(n_images)
]
# images are padded by 2 pixels inbetween each but here you concatenate => will pad twice
# => remove all the rleft padding for each besides first
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
elif is_uniform_grid:
if not is_grided_trainer:
grids = []
for i in range(n_images):
_, X_cntxt = points_to_grid(
selected_data["X_cntxt"][i],
selected_data["Y_cntxt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
Y_trgt, X_trgt = points_to_grid(
selected_data["X_trgt"][i],
selected_data["Y_trgt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
grids.append(
plot_posterior_samples(
dataset,
dict(
X_cntxt=X_cntxt,
Y_cntxt=Y_trgt, # Y_trgt is all X because no masking for target (assumption)
X_trgt=X_trgt,
Y_trgt=Y_trgt,
),
trainer.module_.cpu(),
**kwargs,
)
)
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
else:
return plot_posterior_samples(
dataset,
{k: torch.cat(v, dim=0) for k, v in selected_data.items()},
trainer.module_.cpu(),
**kwargs,
)
name, trainer = named_trainer
test_loglike = _plot_kde_loglike(name, trainer)
if named_trainer_compare is not None:
left = axes[0].get_xlim()[0]
_ = _plot_kde_loglike(*named_trainer_compare)
axes[0].set_xlim(left=left) # left bound by first model to not look strange
if len(x_lim) != 0:
axes[0].set_xlim(**x_lim)
if percentiles is not None:
idcs = []
values = []
for i, p in enumerate(percentiles):
# value closest to percentile
percentile_val = np.percentile(test_loglike, p, interpolation="nearest")
idcs.append(np.argwhere(test_loglike == percentile_val).item())
values.append(percentile_val)
sorted_idcs = list(np.sort(idcs))[::-1]
if is_smallest_xrange:
axes[0].set_xlim(left=values[0] - 0.05, right=values[-1] + 0.05)
else:
# find indices such that same space between all
values = np.linspace(test_loglike.min(), test_loglike.max(), n_images)
idcs = [(np.abs(test_loglike - v)).argmin() for v in values]
sorted_idcs = list(np.sort(idcs))[::-1]
axes[0].set_ylabel("Density")
axes[0].set_xlabel("Test Log-Likelihood")
selected_data = []
set_seed(seed) # make sure same order and indices for cntxt and trgt
i = -1
saved_values = []
queue = sorted_idcs.copy()
next_idx = queue.pop()
for data in trainer.get_iterator(dataset, training=False):
Xi, yi = unpack_data(data)
for cur_idx in range(yi.size(0)):
i += 1
if next_idx != i:
continue
selected_data.append(
{k: v[cur_idx : cur_idx + 1, ...] for k, v in Xi.items()}
)
if len(queue) == 0:
break
else:
next_idx = queue.pop()
# puts back to non sorted array
selected_data = [selected_data[sorted_idcs[::-1].index(idx)] for idx in idcs]
selected_data = {k: v for k, v in tuple_cont_to_cont_tuple(selected_data).items()}
for v in values:
axes[0].axvline(v, linestyle=":", alpha=0.7, c="tab:green")
axes[0].legend(loc="upper left")
if title is not None:
axes[0].set_title(title, fontsize=18)
is_grided_trainer = isinstance(trainer.module_, GridConvCNP)
grid = _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer)
middle_img = dataset.shape[1] // 2 + 1 # half height
y_ticks = [middle_img, middle_img * 3]
y_ticks_labels = ["Context", name]
if named_trainer_compare is not None:
grid_compare = _plot_posterior_img_selected(
*named_trainer_compare, selected_data, is_grided_trainer
)
grid = torch.cat(
(grid, grid_compare[:, grid_compare.size(1) // (n_samples + 1) + 1 :, :]),
dim=1,
)
y_ticks += [middle_img * (3 + 2 * n_samples)]
y_ticks_labels += [named_trainer_compare[0]]
axes[1].imshow(grid.permute(1, 2, 0).numpy())
axes[1].yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
axes[1].set_yticklabels(y_ticks_labels, rotation="vertical", va="center")
remove_axis(axes[1])
if percentiles is not None:
axes[1].xaxis.set_major_locator(
ticker.FixedLocator(
[
(dataset.shape[2] // 2 + 1) * (i * 2 + 1)
for i, p in enumerate(percentiles)
]
)
)
axes[1].set_xticklabels(["{}%".format(p) for p in percentiles])
else:
axes[1].set_xticks([])
fig.tight_layout(h_pad=h_pad)
# HELPERS
def get_img_toplot(
data, to_plot, mask, is_uniform_grid, downscale_factor=1, is_mask=True
):
mask_toapply = mask if is_mask else torch.ones_like(mask).bool()
dim_grid = 2 if is_uniform_grid else 1
if is_uniform_grid:
background = (
data.missing_px_color.view(1, *[1] * dim_grid, 3)
.expand(*to_plot.shape)
.clone()
)
if mask.size(-1) == 1:
out = torch.where(mask_toapply, to_plot, background)
else:
background[mask_toapply.squeeze(-1)] = to_plot.reshape(-1, 3)
out = background.clone()
else:
out, _ = points_to_grid(
mask_toapply,
to_plot,
data.shape[1:],
background=data.missing_px_color,
downscale_factor=downscale_factor,
)
# the second time is to get the mask (not the first one because in case not `is_mask`)
# you still want to return the actual mask
_, mask = points_to_grid(
mask, to_plot, data.shape[1:], downscale_factor=downscale_factor
)
return out, mask
def keep_most_different_samples_(samples, n_samples, p=2):
"""Keep the `n_samples` most different samples (of the posterior predictive) using Lp distance of mean."""
n_possible_samples = samples.batch_shape[0]
assert n_samples <= n_possible_samples
loc = samples.base_dist.loc
scale = samples.base_dist.scale
# select the first image to start with
selected_idcs = [0]
pool_idcs = set(range(1, n_possible_samples))
for i in range(n_samples - 1):
mean_distances = {
i: mean(
[
torch.dist(loc[selected_idx], loc[i], p=p)
for selected_idx in selected_idcs
]
)
for i in pool_idcs
}
idx_to_select = max(mean_distances, key=mean_distances.get)
selected_idcs.append(idx_to_select)
pool_idcs.remove(idx_to_select)
samples.base_dist.loc = loc[selected_idcs]
samples.base_dist.scale = scale[selected_idcs]
def marginal_log_like(predictive, samples):
"""compute log likelihood for evaluation"""
# size = [n_z_samples, batch_size, *]
log_p = predictive.log_prob(samples)
# mean overlay samples in log space
ll = torch.logsumexp(log_p, 0) - math.log(predictive.batch_shape[0])
return ll.exp()
def sarle(out, axis=0):
"""Return sarle multi modal coefficient"""
k = scipy.stats.kurtosis(out, axis=axis, fisher=True)
g = scipy.stats.skew(out, axis=axis)
n = out.shape[1]
denom = k + 3 * (n - 1) ** 2 / ((n - 2) * (n - 2))
return (g ** 2 + 1) / denom
def get_downscale_factor(get_cntxt_trgt):
"""Return the scaling factor for the test set (used when extrapolation.)"""
downscale_factor = 1
try:
downscale_factor = get_cntxt_trgt.upscale_factor
except AttributeError:
pass
return downscale_factor
def remove_axis(ax, is_rm_ticks=True, is_rm_spines=True):
"""Remove all axis but not the labels."""
if is_rm_spines:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.set_frame_on(False)
if is_rm_ticks:
ax.tick_params(bottom="off", left="off")
def idcs_grid_to_idcs_flatten(idcs, grid_shape):
"""Convert a tensor containing indices of a grid to indices on the flatten grid."""
for i, size in enumerate(grid_shape):
idcs[:, :, i] *= prod(grid_shape[i + 1 :])
return idcs.sum(-1)
def points_to_grid(
X, Y, grid_shape, background=torch.tensor([0.0, 0.0, 0.0]), downscale_factor=1
):
"""Converts points to a grid (undo mask select from datasplit)"""
batch_size, _, y_dim = Y.shape
X = X.clone()
background = background.view(1, *(1 for _ in grid_shape), y_dim).repeat(
batch_size, *grid_shape, 1
)
X /= downscale_factor
for i, size in enumerate(grid_shape):
X[:, :, i] += 1 # in [0,2]
X[:, :, i] /= 2 / (size - 1) # in [0,size]
X = X.round().long()
idcs = idcs_grid_to_idcs_flatten(X, grid_shape)
background = background.view(batch_size, -1, y_dim)
mask = torch.zeros(batch_size, background.size(1), 1).bool()
for b in range(batch_size):
background[b, idcs[b], :] = Y[b]
mask[b, idcs[b], :] = True
background = background.view(batch_size, *grid_shape, y_dim)
return background, mask.view(batch_size, *grid_shape, 1)
class CntxtTrgtDict(dict):
"""Dictionary that has `upscale_factor` argument."""
def __init__(self, *arg, upscale_factor=1, **kw):
self.upscale_factor = upscale_factor
super().__init__(*arg, **kw)
| [
"seaborn.kdeplot",
"numpy.abs",
"torch.cat",
"logging.getLogger",
"numpy.argsort",
"npf.utils.predict.SamplePredictor",
"npf.utils.helpers.MultivariateNormalDiag",
"numpy.arange",
"os.path.join",
"torch.dist",
"npf.utils.datasplit.GridCntxtTrgtGetter",
"matplotlib.ticker.FixedLocator",
"matp... | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (935, 945), False, 'import logging\n'), ((1127, 1141), 'utils.helpers.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1135, 1141), False, 'from utils.helpers import mean, set_seed, tuple_cont_to_cont_tuple\n'), ((1347, 1397), 'torchvision.utils.make_grid', 'make_grid', (['img_tensor'], {'nrow': '(2)', 'pad_value': 'pad_value'}), '(img_tensor, nrow=2, pad_value=pad_value)\n', (1356, 1397), False, 'from torchvision.utils import make_grid\n'), ((1742, 1756), 'utils.helpers.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (1750, 1756), False, 'from utils.helpers import mean, set_seed, tuple_cont_to_cont_tuple\n'), ((3567, 3662), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'gridspec_kw': "{'width_ratios': [1, 1], 'wspace': wspace}", 'figsize': 'figsize'}), "(1, 2, gridspec_kw={'width_ratios': [1, 1], 'wspace': wspace},\n figsize=figsize)\n", (3579, 3662), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4144), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (4132, 4144), False, 'import torch\n'), ((5383, 5442), 'seaborn.despine', 'sns.despine', ([], {'top': '(True)', 'right': '(True)', 'left': '(False)', 'bottom': '(False)'}), '(top=True, right=True, left=False, bottom=False)\n', (5394, 5442), True, 'import seaborn as sns\n'), ((12906, 12956), 'torchvision.utils.make_grid', 'make_grid', (['outs'], {'nrow': 'n_fig_per_row', 'pad_value': '(1.0)'}), '(outs, nrow=n_fig_per_row, pad_value=1.0)\n', (12915, 12956), False, 'from torchvision.utils import make_grid\n'), ((16208, 16253), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': font_size}"], {}), "({'font.size': font_size})\n", (16227, 16253), True, 'import matplotlib.pyplot as plt\n'), ((16270, 16355), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize', 'gridspec_kw': "{'height_ratios': height_ratios}"}), "(2, 1, figsize=figsize, gridspec_kw={'height_ratios':\n height_ratios})\n", (16282, 16355), True, 'import matplotlib.pyplot as plt\n'), ((16510, 16563), 'functools.partial', 'partial', (['CntxtTrgtDict'], {'upscale_factor': 'upscale_factor'}), '(CntxtTrgtDict, upscale_factor=upscale_factor)\n', (16517, 16563), False, 'from functools import partial\n'), ((21852, 21866), 'utils.helpers.set_seed', 'set_seed', (['seed'], {}), '(seed)\n', (21860, 21866), False, 'from utils.helpers import mean, set_seed, tuple_cont_to_cont_tuple\n'), ((26767, 26816), 'scipy.stats.kurtosis', 'scipy.stats.kurtosis', (['out'], {'axis': 'axis', 'fisher': '(True)'}), '(out, axis=axis, fisher=True)\n', (26787, 26816), False, 'import scipy\n'), ((26825, 26857), 'scipy.stats.skew', 'scipy.stats.skew', (['out'], {'axis': 'axis'}), '(out, axis=axis)\n', (26841, 26857), False, 'import scipy\n'), ((27970, 27999), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (27982, 27999), False, 'import torch\n'), ((1180, 1209), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1192, 1209), True, 'import matplotlib.pyplot as plt\n'), ((2700, 2736), 'npf.utils.predict.SamplePredictor', 'SamplePredictor', (['model'], {'is_dist': '(True)'}), '(model, is_dist=True)\n', (2715, 2736), False, 'from npf.utils.predict import SamplePredictor\n'), ((4360, 4482), 'npf.utils.helpers.MultivariateNormalDiag', 'MultivariateNormalDiag', (['predictive_all.base_dist.loc[:, i:i + 1, ...]', 'predictive_all.base_dist.scale[:, i:i + 1, ...]'], {}), '(predictive_all.base_dist.loc[:, i:i + 1, ...],\n predictive_all.base_dist.scale[:, i:i + 1, ...])\n', (4382, 4482), False, 'from npf.utils.helpers import MultivariateNormalDiag, channels_to_2nd_dim, prod\n'), ((5290, 5313), 'numpy.argsort', 'np.argsort', (['best_sarles'], {}), '(best_sarles)\n', (5300, 5313), True, 'import numpy as np\n'), ((12395, 12426), 'torch.stack', 'torch.stack', (['out_interps'], {'dim': '(0)'}), '(out_interps, dim=0)\n', (12406, 12426), False, 'import torch\n'), ((13109, 13138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13121, 13138), True, 'import matplotlib.pyplot as plt\n'), ((16705, 16749), 'os.path.join', 'os.path.join', (['chckpnt_dirname', 'EVAL_FILENAME'], {}), '(chckpnt_dirname, EVAL_FILENAME)\n', (16717, 16749), False, 'import os\n'), ((16773, 16814), 'numpy.loadtxt', 'np.loadtxt', (['test_eval_file'], {'delimiter': '""","""'}), "(test_eval_file, delimiter=',')\n", (16783, 16814), True, 'import numpy as np\n'), ((16823, 16914), 'seaborn.kdeplot', 'sns.kdeplot', (['test_loglike'], {'ax': 'axes[0]', 'shade': '(True)', 'label': 'name', 'cut': '(0)'}), '(test_loglike, ax=axes[0], shade=True, label=name, cut=0, **\n kdeplot_kwargs)\n', (16834, 16914), True, 'import seaborn as sns\n'), ((16940, 16953), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (16951, 16953), True, 'import seaborn as sns\n'), ((17051, 17101), 'npf.utils.datasplit.GridCntxtTrgtGetter', 'GridCntxtTrgtGetter', ([], {'upscale_factor': 'upscale_factor'}), '(upscale_factor=upscale_factor)\n', (17070, 17101), False, 'from npf.utils.datasplit import GridCntxtTrgtGetter\n'), ((22095, 22112), 'skorch.dataset.unpack_data', 'unpack_data', (['data'], {}), '(data)\n', (22106, 22112), False, 'from skorch.dataset import unpack_data\n'), ((23683, 23711), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['y_ticks'], {}), '(y_ticks)\n', (23702, 23711), True, 'import matplotlib.ticker as ticker\n'), ((26601, 26626), 'torch.logsumexp', 'torch.logsumexp', (['log_p', '(0)'], {}), '(log_p, 0)\n', (26616, 26626), False, 'import torch\n'), ((26629, 26664), 'math.log', 'math.log', (['predictive.batch_shape[0]'], {}), '(predictive.batch_shape[0])\n', (26637, 26664), False, 'import math\n'), ((27865, 27889), 'npf.utils.helpers.prod', 'prod', (['grid_shape[i + 1:]'], {}), '(grid_shape[i + 1:])\n', (27869, 27889), False, 'from npf.utils.helpers import MultivariateNormalDiag, channels_to_2nd_dim, prod\n'), ((4717, 4737), 'numpy.median', 'np.median', (['new_sarle'], {}), '(new_sarle)\n', (4726, 4737), True, 'import numpy as np\n'), ((5057, 5079), 'numpy.median', 'np.median', (['best_sarles'], {}), '(best_sarles)\n', (5066, 5079), True, 'import numpy as np\n'), ((12140, 12237), 'scipy.interpolate.griddata', 'griddata', (['(coord_x, coord_y)', 'out_cntxt[i, coord_y, coord_x]', '(grid_x, grid_y)'], {'method': 'interp'}), '((coord_x, coord_y), out_cntxt[i, coord_y, coord_x], (grid_x,\n grid_y), method=interp)\n', (12148, 12237), False, 'from scipy.interpolate import griddata\n'), ((18867, 18892), 'torch.cat', 'torch.cat', (['grids'], {'axis': '(-1)'}), '(grids, axis=-1)\n', (18876, 18892), False, 'import torch\n'), ((21149, 21204), 'numpy.percentile', 'np.percentile', (['test_loglike', 'p'], {'interpolation': '"""nearest"""'}), "(test_loglike, p, interpolation='nearest')\n", (21162, 21204), True, 'import numpy as np\n'), ((24697, 24743), 'torch.where', 'torch.where', (['mask_toapply', 'to_plot', 'background'], {}), '(mask_toapply, to_plot, background)\n', (24708, 24743), False, 'import torch\n'), ((2393, 2460), 'utils.data.cntxt_trgt_collate', 'cntxt_trgt_collate', (['get_cntxt_trgt'], {'is_return_masks': 'is_uniform_grid'}), '(get_cntxt_trgt, is_return_masks=is_uniform_grid)\n', (2411, 2460), False, 'from utils.data import cntxt_trgt_collate\n'), ((12034, 12066), 'numpy.arange', 'np.arange', (['(0)', 'out_cntxt.shape[2]'], {}), '(0, out_cntxt.shape[2])\n', (12043, 12066), True, 'import numpy as np\n'), ((12068, 12100), 'numpy.arange', 'np.arange', (['(0)', 'out_cntxt.shape[1]'], {}), '(0, out_cntxt.shape[1])\n', (12077, 12100), True, 'import numpy as np\n'), ((12344, 12372), 'torch.from_numpy', 'torch.from_numpy', (['out_interp'], {}), '(out_interp)\n', (12360, 12372), False, 'import torch\n'), ((12571, 12593), 'torch.cat', 'torch.cat', (['outs'], {'dim': '(0)'}), '(outs, dim=0)\n', (12580, 12593), False, 'import torch\n'), ((13557, 13585), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['y_ticks'], {}), '(y_ticks)\n', (13576, 13585), True, 'import matplotlib.ticker as ticker\n'), ((13833, 13861), 'matplotlib.ticker.FixedLocator', 'ticker.FixedLocator', (['y_ticks'], {}), '(y_ticks)\n', (13852, 13861), True, 'import matplotlib.ticker as ticker\n'), ((21350, 21363), 'numpy.sort', 'np.sort', (['idcs'], {}), '(idcs)\n', (21357, 21363), True, 'import numpy as np\n'), ((21721, 21734), 'numpy.sort', 'np.sort', (['idcs'], {}), '(idcs)\n', (21728, 21734), True, 'import numpy as np\n'), ((24399, 24420), 'torch.ones_like', 'torch.ones_like', (['mask'], {}), '(mask)\n', (24414, 24420), False, 'import torch\n'), ((20307, 20332), 'torch.cat', 'torch.cat', (['grids'], {'axis': '(-1)'}), '(grids, axis=-1)\n', (20316, 20332), False, 'import torch\n'), ((21642, 21666), 'numpy.abs', 'np.abs', (['(test_loglike - v)'], {}), '(test_loglike - v)\n', (21648, 21666), True, 'import numpy as np\n'), ((22623, 22662), 'utils.helpers.tuple_cont_to_cont_tuple', 'tuple_cont_to_cont_tuple', (['selected_data'], {}), '(selected_data)\n', (22647, 22662), False, 'from utils.helpers import mean, set_seed, tuple_cont_to_cont_tuple\n'), ((25953, 25995), 'torch.dist', 'torch.dist', (['loc[selected_idx]', 'loc[i]'], {'p': 'p'}), '(loc[selected_idx], loc[i], p=p)\n', (25963, 25995), False, 'import torch\n'), ((21229, 21272), 'numpy.argwhere', 'np.argwhere', (['(test_loglike == percentile_val)'], {}), '(test_loglike == percentile_val)\n', (21240, 21272), True, 'import numpy as np\n'), ((20451, 20470), 'torch.cat', 'torch.cat', (['v'], {'dim': '(0)'}), '(v, dim=0)\n', (20460, 20470), False, 'import torch\n'), ((19265, 19303), 'torch.tensor', 'torch.tensor', (['([0.0] * dataset.shape[0])'], {}), '([0.0] * dataset.shape[0])\n', (19277, 19303), False, 'import torch\n'), ((19562, 19600), 'torch.tensor', 'torch.tensor', (['([0.0] * dataset.shape[0])'], {}), '([0.0] * dataset.shape[0])\n', (19574, 19600), False, 'import torch\n')] |
from hls4ml.converters.keras_to_hls import keras_to_hls
import pytest
import hls4ml
import numpy as np
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import model_from_json
import yaml
@pytest.fixture(scope='module')
def data():
X = np.random.rand(100,100,7)
return X
@pytest.fixture(scope='module')
def keras_model():
jsons = open('../../example-models/keras/KERAS_conv1d.json','r').read()
model = model_from_json(jsons)
model.load_weights('../../example-models/keras/KERAS_conv1d_weights.h5')
return model
@pytest.fixture
@pytest.mark.parametrize('settings', [('io_parallel', 'latency'),
('io_parallel', 'resource'),
('io_stream', 'latency'),
('io_stream', 'resource')])
def hls_model(settings):
io_type = settings[0]
strategy = settings[1]
config = hls4ml.converters.create_config(output_dir = 'hls4mlprj_conv1d_{}_{}'.format(io_type, strategy))
config['KerasJson'] = '../../example-models/keras/KERAS_conv1d.json'
config['KerasH5'] = '../../example-models/keras/KERAS_conv1d_weights.h5'
config['OutputDir'] = 'hls4mlprj_conv1d_{}_{}'.format(io_type, strategy)
config['IOType'] = io_type
hls_config = {'Model' : {'Strategy' : strategy,
'ReuseFactor' : 1,
'Precision' : 'ap_fixed<16,3,AP_RND_CONV,AP_SAT>'}}
# Some model specific precision tuning
config['LayerName'] = {}
config['LayerName']['fc1_relu'] = {'Precision':{'weight' : 'ap_fixed<16,3>', 'result' : 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>'}}
config['LayerName']['output_softmax'] = {'Precision':{'weight' : 'ap_fixed<16,6>', 'result' : 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>'}}
config['LayerName']['output_softmax_softmax'] = {'Strategy':'Stable'}
config['HLSConfig'] = hls_config
hls_model = keras_to_hls(config)
hls_model.compile()
return hls_model
@pytest.mark.parametrize('settings', [('io_parallel', 'latency'),
('io_parallel', 'resource'),
('io_stream', 'latency'),
('io_stream', 'resource')])
def test_accuracy(data, keras_model, hls_model):
X = data
model = keras_model
# model under test predictions and accuracy
y_keras = model.predict(X)
y_hls4ml = hls_model.predict(X)
# "accuracy" of hls4ml predictions vs keras
rel_acc = accuracy_score(np.argmax(y_keras, axis=1), np.argmax(y_hls4ml, axis=1))
print('hls4ml accuracy relative to keras: {}'.format(rel_acc))
assert rel_acc > 0.98
| [
"hls4ml.converters.keras_to_hls.keras_to_hls",
"numpy.argmax",
"pytest.fixture",
"numpy.random.rand",
"pytest.mark.parametrize",
"tensorflow.keras.models.model_from_json"
] | [((236, 266), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (250, 266), False, 'import pytest\n'), ((328, 358), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (342, 358), False, 'import pytest\n'), ((607, 763), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""settings"""', "[('io_parallel', 'latency'), ('io_parallel', 'resource'), ('io_stream',\n 'latency'), ('io_stream', 'resource')]"], {}), "('settings', [('io_parallel', 'latency'), (\n 'io_parallel', 'resource'), ('io_stream', 'latency'), ('io_stream',\n 'resource')])\n", (630, 763), False, 'import pytest\n'), ((2034, 2190), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""settings"""', "[('io_parallel', 'latency'), ('io_parallel', 'resource'), ('io_stream',\n 'latency'), ('io_stream', 'resource')]"], {}), "('settings', [('io_parallel', 'latency'), (\n 'io_parallel', 'resource'), ('io_stream', 'latency'), ('io_stream',\n 'resource')])\n", (2057, 2190), False, 'import pytest\n'), ((287, 314), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(7)'], {}), '(100, 100, 7)\n', (301, 314), True, 'import numpy as np\n'), ((466, 488), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['jsons'], {}), '(jsons)\n', (481, 488), False, 'from tensorflow.keras.models import model_from_json\n'), ((1966, 1986), 'hls4ml.converters.keras_to_hls.keras_to_hls', 'keras_to_hls', (['config'], {}), '(config)\n', (1978, 1986), False, 'from hls4ml.converters.keras_to_hls import keras_to_hls\n'), ((2576, 2602), 'numpy.argmax', 'np.argmax', (['y_keras'], {'axis': '(1)'}), '(y_keras, axis=1)\n', (2585, 2602), True, 'import numpy as np\n'), ((2604, 2631), 'numpy.argmax', 'np.argmax', (['y_hls4ml'], {'axis': '(1)'}), '(y_hls4ml, axis=1)\n', (2613, 2631), True, 'import numpy as np\n')] |
"""
.. _dynamictable-howtoguide:
DynamicTable How-To Guide
=========================
This is a user guide to interacting with ``DynamicTable`` objects.
"""
###############################################################################
# Introduction
# ------------
# The :py:class:`~hdmf.common.table.DynamicTable` class represents a column-based table
# to which you can add custom columns. It consists of a name, a description, a list of
# row IDs, and a list of columns. Columns are represented by objects of the class
# :py:class:`~hdmf.common.table.VectorData`, including subclasses of
# :py:class:`~hdmf.common.table.VectorData`, such as :py:class:`~hdmf.common.table.VectorIndex`,
# and :py:class:`~hdmf.common.table.DynamicTableRegion`.
###############################################################################
# Constructing a table
# --------------------
# To create a :py:class:`~hdmf.common.table.DynamicTable`, call the constructor for
# :py:class:`~hdmf.common.table.DynamicTable` with a string ``name`` and string
# ``description``. Specifying the arguments with keywords is recommended.
# sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnail_dynamictable.png'
from hdmf.common import DynamicTable
table = DynamicTable(
name='my_table',
description='an example table',
)
###############################################################################
# Initializing columns
# --------------------
# You can create a :py:class:`~hdmf.common.table.DynamicTable` with particular
# columns by passing a list or tuple of
# :py:class:`~hdmf.common.table.VectorData` objects for the ``columns`` argument
# in the constructor.
#
# If the :py:class:`~hdmf.common.table.VectorData` objects contain data values,
# then each :py:class:`~hdmf.common.table.VectorData` object must contain the
# same number of rows as each other. A list of row IDs may be passed into the
# :py:class:`~hdmf.common.table.DynamicTable` constructor using the ``id``
# argument. If IDs are passed in, there should be the same number of rows as
# the column data. If IDs are not passed in, then the IDs will be set to
# ``range(len(column_data))`` by default.
from hdmf.common import VectorData, VectorIndex
col1 = VectorData(
name='col1',
description='column #1',
data=[1, 2],
)
col2 = VectorData(
name='col2',
description='column #2',
data=['a', 'b'],
)
# this table will have two rows with ids 0 and 1
table = DynamicTable(
name='my table',
description='an example table',
columns=[col1, col2],
)
# this table will have two rows with ids 0 and 1
table_set_ids = DynamicTable(
name='my table',
description='an example table',
columns=[col1, col2],
id=[0, 1],
)
###############################################################################
# If a list of integers in passed to ``id``,
# :py:class:`~hdmf.common.table.DynamicTable` automatically creates
# an :py:class:`~hdmf.common.table.ElementIdentifiers` object, which is the data type
# that stores row IDs. The above command is equivalent to:
from hdmf.common.table import ElementIdentifiers
table_set_ids = DynamicTable(
name='my table',
description='an example table',
columns=[col1, col2],
id=ElementIdentifiers(name='id', data=[0, 1]),
)
###############################################################################
# Adding rows
# -----------
# You can also add rows to a :py:class:`~hdmf.common.table.DynamicTable` using
# :py:meth:`DynamicTable.add_row <hdmf.common.table.DynamicTable.add_row>`.
# A keyword argument for every column in the table must be supplied.
# You may also supply an optional row ID.
table.add_row(
col1=3,
col2='c',
id=2,
)
###############################################################################
# .. note::
# If no ID is supplied, the row ID is automatically set to the number of rows of the table prior to adding the new
# row. This can result in duplicate IDs. In general, IDs should be unique, but this is not enforced by default.
# Pass `enforce_unique_id=True` to :py:meth:`DynamicTable.add_row <hdmf.common.table.DynamicTable.add_row>`
# to raise an error if the ID is set to an existing ID value.
# this row will have ID 3 by default
table.add_row(
col1=4,
col2='d',
)
###############################################################################
# Adding columns
# --------------
# You can add columns to a :py:class:`~hdmf.common.table.DynamicTable` using
# :py:meth:`DynamicTable.add_column <hdmf.common.table.DynamicTable.add_column>`.
# If the table already has rows, then the ``data`` argument must be supplied
# as a list of values, one for each row already in the table.
table.add_column(
name='col3',
description='column #3',
data=[True, True, False, True], # specify data for the 4 rows in the table
)
###############################################################################
# Enumerated (categorical) data
# -----------------------------
# :py:class:`~hdmf.common.table.EnumData` is a special type of column for storing
# an enumerated data type. This way each unique value is stored once, and the data
# references those values by index. Using this method is more efficient than storing
# a single value many times, and has the advantage of communicating to downstream
# tools that the data is categorical in nature.
from hdmf.common.table import EnumData
# this column has a length of 5, not 3. the first row has value "aa"
enum_col = EnumData(
name='cell_type',
description='this column holds categorical variables',
data=[0, 1, 2, 1, 0],
elements=['aa', 'bb', 'cc']
)
my_table = DynamicTable(
name='my_table',
description='an example table',
columns=[enum_col],
)
###############################################################################
# Ragged array columns
# --------------------
# A table column with a different number of elements for each row is called a
# "ragged array column". To initialize a :py:class:`~hdmf.common.table.DynamicTable`
# with a ragged array column, pass both
# the :py:class:`~hdmf.common.table.VectorIndex` and its target
# :py:class:`~hdmf.common.table.VectorData` in for the ``columns``
# argument in the constructor. For instance, the following code creates a column
# called ``col1`` where the first cell is ['1a', '1b', '1c'] and the second cell
# is ['2a'].
col1 = VectorData(
name='col1',
description='column #1',
data=['1a', '1b', '1c', '2a'],
)
# the 3 signifies that elements 0 to 3 (exclusive) of the target column belong to the first row
# the 4 signifies that elements 3 to 4 (exclusive) of the target column belong to the second row
col1_ind = VectorIndex(
name='col1_index',
target=col1,
data=[3, 4],
)
table_ragged_col = DynamicTable(
name='my table',
description='an example table',
columns=[col1, col1_ind],
)
####################################################################################
# .. note::
# By convention, the name of the :py:class:`~hdmf.common.table.VectorIndex` should be
# the name of the target column with the added suffix "_index".
####################################################################################
# VectorIndex.data provides the indices for how to break VectorData.data into cells
#
# You can add an empty ragged array column to an existing
# :py:class:`~hdmf.common.table.DynamicTable` by specifying ``index=True``
# to :py:meth:`DynamicTable.add_column <hdmf.common.table.DynamicTable.add_column>`.
# This method only works if run before any rows have been added to the table.
new_table = DynamicTable(
name='my_table',
description='an example table',
)
new_table.add_column(
name='col4',
description='column #4',
index=True,
)
###############################################################################
# If the table already contains data, you must specify the new column values for
# the existing rows using the ``data`` argument and you must specify the end indices of
# the ``data`` argument that correspond to each row as a list/tuple/array of values for
# the ``index`` argument.
table.add_column( # <-- this table already has 4 rows
name='col4',
description='column #4',
data=[1, 0, -1, 0, -1, 1, 1, -1],
index=[3, 4, 6, 8], # specify the end indices (exclusive) of data for each row
)
###############################################################################
# Referencing rows of other tables
# --------------------------------
# You can create a column that references rows of another table by adding a
# :py:class:`~hdmf.common.table.DynamicTableRegion` object as a column of your
# :py:class:`~hdmf.common.table.DynamicTable`. This is analogous to
# a foreign key in a relational database.
from hdmf.common.table import DynamicTableRegion
dtr_col = DynamicTableRegion(
name='table1_ref',
description='references rows of earlier table',
data=[0, 1, 0, 0], # refers to row indices of the 'table' variable
table=table
)
data_col = VectorData(
name='col2',
description='column #2',
data=['a', 'a', 'a', 'b'],
)
table2 = DynamicTable(
name='my_table',
description='an example table',
columns=[dtr_col, data_col],
)
###############################################################################
# Here, the ``data`` of ``dtr_col`` maps to rows of ``table`` (0-indexed).
#
# .. note::
# The ``data`` values of :py:class:`~hdmf.common.table.DynamicTableRegion` map to the row
# index, not the row ID, though if you are using default IDs, these values will be the
# same.
#
# Reference more than one row of another table with a
# :py:class:`~hdmf.common.table.DynamicTableRegion` indexed by a
# :py:class:`~hdmf.common.table.VectorIndex`.
indexed_dtr_col = DynamicTableRegion(
name='table1_ref2',
description='references multiple rows of earlier table',
data=[0, 0, 1, 1, 0, 0, 1],
table=table
)
# row 0 refers to rows [0, 0], row 1 refers to rows [1], row 2 refers to rows [1, 0], row 3 refers to rows [0, 1] of
# the "table" variable
dtr_idx = VectorIndex(
name='table1_ref2_index',
target=indexed_dtr_col,
data=[2, 3, 5, 7],
)
table3 = DynamicTable(
name='my_table',
description='an example table',
columns=[dtr_idx, indexed_dtr_col],
)
###############################################################################
# Creating an expandable table
# ----------------------------
# When using the default HDF5 backend, each column of these tables is an HDF5 Dataset,
# which by default are set in size. This means that once a file is written, it is not
# possible to add a new row. If you want to be able to save this file, load it, and add
# more rows to the table, you will need to set this up when you create the
# :py:class:`~hdmf.common.table.DynamicTable`. You do this by wrapping the data with
# :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO` and the argument ``maxshape=(None, )``.
from hdmf.backends.hdf5.h5_utils import H5DataIO
col1 = VectorData(
name='expandable_col1',
description='column #1',
data=H5DataIO(data=[1, 2], maxshape=(None,)),
)
col2 = VectorData(
name='expandable_col2',
description='column #2',
data=H5DataIO(data=['a', 'b'], maxshape=(None,)),
)
# don't forget to wrap the row IDs too!
ids = ElementIdentifiers(
name='id',
data=H5DataIO(data=[0, 1], maxshape=(None,)),
)
expandable_table = DynamicTable(
name='expandable_table',
description='an example table that can be expanded after being saved to a file',
columns=[col1, col2],
id=ids,
)
###############################################################################
# Now you can write the file, read it back, and run ``expandable_table.add_row()``.
# In this example, we are setting ``maxshape`` to ``(None,)``, which means this is a
# 1-dimensional matrix that can expand indefinitely along its single dimension. You
# could also use an integer in place of ``None``. For instance, ``maxshape=(8,)`` would
# allow the column to grow up to a length of 8. Whichever ``maxshape`` you choose,
# it should be the same for all :py:class:`~hdmf.common.table.VectorData` and
# :py:class:`~hdmf.common.table.ElementIdentifiers` objects in the
# :py:class:`~hdmf.common.table.DynamicTable`, since they must always be the same
# length. The default :py:class:`~hdmf.common.table.ElementIdentifiers` automatically
# generated when you pass a list of integers to the ``id`` argument of the
# :py:class:`~hdmf.common.table.DynamicTable` constructor is not expandable, so do not
# forget to create a :py:class:`~hdmf.common.table.ElementIdentifiers` object, and wrap
# that data as well. If any of the columns are indexed, the ``data`` argument of
# :py:class:`~hdmf.common.table.VectorIndex` will also need to be wrapped with
# :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`.
#
#
# Converting the table to a pandas ``DataFrame``
# ----------------------------------------------
# `pandas`_ is a popular data analysis tool, especially for working with tabular data.
# You can convert your :py:class:`~hdmf.common.table.DynamicTable` to a
# :py:class:`~pandas.DataFrame` using
# :py:meth:`DynamicTable.to_dataframe <hdmf.common.table.DynamicTable.to_dataframe>`.
# Accessing the table as a :py:class:`~pandas.DataFrame` provides you with powerful,
# standard methods for indexing, selecting, and querying tabular data from `pandas`_.
# This is the recommended method of reading data from your table. See also the `pandas indexing documentation`_.
# Printing a :py:class:`~hdmf.common.table.DynamicTable` as a :py:class:`~pandas.DataFrame`
# or displaying the :py:class:`~pandas.DataFrame` in Jupyter shows a more intuitive
# tabular representation of the data than printing the
# :py:class:`~hdmf.common.table.DynamicTable` object.
#
# .. _pandas: https://pandas.pydata.org/
# .. _`pandas indexing documentation`: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html
df = table.to_dataframe()
###############################################################################
# .. note::
#
# Changes to the ``DataFrame`` will not be saved in the ``DynamicTable``.
###############################################################################
# Converting the table from a pandas ``DataFrame``
# ------------------------------------------------
# If your data is already in a :py:class:`~pandas.DataFrame`, you can convert the
# ``DataFrame`` to a :py:class:`~hdmf.common.table.DynamicTable` using the class method
# :py:meth:`DynamicTable.from_dataframe <hdmf.common.table.DynamicTable.from_dataframe>`.
table_from_df = DynamicTable.from_dataframe(
name='my_table',
df=df,
)
###############################################################################
# Accessing elements
# ------------------
# To access an element in the i-th row in the column with name "col_name" in a
# :py:class:`~hdmf.common.table.DynamicTable`, use square brackets notation:
# ``table[i, col_name]``. You can also use a tuple of row index and column
# name within the square brackets.
table[0, 'col1'] # returns 1
table[(0, 'col1')] # returns 1
###############################################################################
# If the column is a ragged array, instead of a single value being returned,
# a list of values for that element is returned.
table[0, 'col4'] # returns [1, 0, -1]
###############################################################################
# Standard Python and numpy slicing can be used for the row index.
import numpy as np
table[:2, 'col1'] # get a list of elements from the first two rows at column 'col1'
table[0:3:2, 'col1'] # get a list of elements from rows 0 to 3 (exclusive) in steps of 2 at column 'col1'
table[3::-1, 'col1'] # get a list of elements from rows 3 to 0 in reverse order at column 'col1'
# the following are equivalent to table[0:3:2, 'col1']
table[slice(0, 3, 2), 'col1']
table[np.s_[0:3:2], 'col1']
table[[0, 2], 'col1']
table[np.array([0, 2]), 'col1']
###############################################################################
# If the column is a ragged array, instead of a list of row values being returned,
# a list of list elements for the selected rows is returned.
table[:2, 'col4'] # returns [[1, 0, -1], [0]]
###############################################################################
# .. note::
#
# You cannot supply a list/tuple for the column name. For this
# kind of access, first convert the :py:class:`~hdmf.common.table.DynamicTable`
# to a :py:class:`~pandas.DataFrame`.
###############################################################################
# Accessing columns
# -----------------
# To access all the values in a column, use square brackets with a colon for the
# row index: ``table[:, col_name]``. If the column is a ragged array, a list of
# list elements is returned.
table[:, 'col1'] # returns [1, 2, 3, 4]
table[:, 'col4'] # returns [[1, 0, -1], [0], [-1, 1], [1, -1]]
###############################################################################
# Accessing rows
# --------------
# To access the i-th row in a :py:class:`~hdmf.common.table.DynamicTable`, returned
# as a :py:class:`~pandas.DataFrame`, use the syntax ``table[i]``. Standard Python
# and numpy slicing can be used for the row index.
table[0] # get the 0th row of the table as a DataFrame
table[:2] # get the first two rows
table[0:3:2] # get rows 0 to 3 (exclusive) in steps of 2
table[3::-1] # get rows 3 to 0 in reverse order
# the following are equivalent to table[0:3:2]
table[slice(0, 3, 2)]
table[np.s_[0:3:2]]
table[[0, 2]]
table[np.array([0, 2])]
###############################################################################
# .. note::
#
# The syntax ``table[i]`` returns the i-th row, NOT the row with ID of `i`.
###############################################################################
# Iterating over rows
# --------------------
# To iterate over the rows of a :py:class:`~hdmf.common.table.DynamicTable`,
# first convert the :py:class:`~hdmf.common.table.DynamicTable` to a
# :py:class:`~pandas.DataFrame` using
# :py:meth:`DynamicTable.to_dataframe <hdmf.common.table.DynamicTable>`.
# For more information on iterating over a :py:class:`~pandas.DataFrame`,
# see https://pandas.pydata.org/pandas-docs/stable/user_guide/basics.html#iteration
df = table.to_dataframe()
for row in df.itertuples():
print(row)
###############################################################################
# Accessing the column data types
# -------------------------------
# To access the :py:class:`~hdmf.common.table.VectorData` or
# :py:class:`~hdmf.common.table.VectorIndex` object representing a column, you
# can use three different methods. Use the column name in square brackets, e.g.,
# ``table[col_name]``, use the
# :py:meth:`DynamicTable.get <hdmf.common.table.DynamicTable.get>` method, or
# use the column name as an attribute, e.g., ``table.col_name``.
table['col1']
table.get('col1') # equivalent to table['col1'] except this returns None if 'col1' is not found
table.get('col1', default=0) # you can change the default return value
table.col1
###############################################################################
# .. note::
#
# Using the column name as an attribute does NOT work if the column name is
# the same as a non-column name attribute or method of the
# :py:class:`~hdmf.common.table.DynamicTable` class,
# e.g., ``name``, ``description``, ``object_id``, ``parent``, ``modified``.
###############################################################################
# If the column is a ragged array, then the methods above will return the
# :py:class:`~hdmf.common.table.VectorIndex` associated with the ragged array.
table['col4']
table.get('col4') # equivalent to table['col4'] except this returns None if 'col4' is not found
table.get('col4', default=0) # you can change the default return value
###############################################################################
# .. note::
#
# The attribute syntax ``table.col_name`` currently returns the ``VectorData``
# instead of the ``VectorIndex`` for a ragged array. This is a known
# issue and will be fixed in a future version of HDMF.
###############################################################################
# Accessing elements from column data types
# -----------------------------------------
# Standard Python and numpy slicing can be used on the
# :py:class:`~hdmf.common.table.VectorData` or
# :py:class:`~hdmf.common.table.VectorIndex` objects to access elements from
# column data. If the column is a ragged array, then instead of a list of row
# values being returned, a list of list elements for the selected rows is returned.
table['col1'][0] # get the 0th element from column 'col1'
table['col1'][:2] # get a list of the 0th and 1st elements
table['col1'][0:3:2] # get a list of the 0th to 3rd (exclusive) elements in steps of 2
table['col1'][3::-1] # get a list of the 3rd to 0th elements in reverse order
# the following are equivalent to table['col1'][0:3:2]
table['col1'][slice(0, 3, 2)]
table['col1'][np.s_[0:3:2]]
table['col1'][[0, 2]]
table['col1'][np.array([0, 2])]
# this slicing and indexing works for ragged array columns as well
table['col4'][:2] # get a list of the 0th and 1st list elements
###############################################################################
# .. note::
#
# The syntax ``table[col_name][i]`` is equivalent to ``table[i, col_name]``.
###############################################################################
# Multi-dimensional columns
# -------------------------
# A column can be represented as a multi-dimensional rectangular array or a list of lists, each containing the
# same number of elements.
col5 = VectorData(
name='col5',
description='column #5',
data=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
)
###############################################################################
# Ragged multi-dimensional columns
# ---------------------------------
# Each element within a column can be an n-dimensional array or list or lists.
# This is true for ragged array columns as well.
col6 = VectorData(
name='col6',
description='column #6',
data=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
)
col6_ind = VectorIndex(
name='col6_index',
target=col6,
data=[2, 3],
)
###############################################################################
# Nested ragged array columns
# ---------------------------
# In the example above, the ragged array column above has two rows. The first row has two elements,
# where each element has 3 sub-elements. This can be thought of as a 2x3 array.
# The second row has one element with 3 sub-elements, or a 1x3 array. This
# works only if the data for ``col5`` is a rectangular array, that is, each row
# element contains the same number of sub-elements. If each row element does
# not contain the same number of sub-elements, then a nested ragged array
# approach must be used instead.
#
# A :py:class:`~hdmf.common.table.VectorIndex` object can index another
# :py:class:`~hdmf.common.table.VectorIndex` object. For example, the first row
# of a table might be a 2x3 array, the second row might be a 3x2 array, and the
# third row might be a 1x1 array. This cannot be represented by a singly
# indexed column, but can be represented by a nested ragged array column.
col7 = VectorData(
name='col7',
description='column #6',
data=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm'],
)
col7_ind = VectorIndex(
name='col7_index',
target=col7,
data=[3, 6, 8, 10, 12, 13],
)
col7_ind_ind = VectorIndex(
name='col7_index_index',
target=col7_ind,
data=[2, 5, 6],
)
# all indices must be added to the table
table_double_ragged_col = DynamicTable(
name='my table',
description='an example table',
columns=[col7, col7_ind, col7_ind_ind],
)
###############################################################################
# Access the first row using the same syntax as before, except now a list of
# lists is returned. You can then index the resulting list of lists to access
# the individual elements.
table_double_ragged_col[0, 'col7'] # returns [['a', 'b', 'c'], ['d', 'e', 'f']]
table_double_ragged_col['col7'][0] # same as line above
###############################################################################
# Accessing the column named 'col7' using square bracket notation will return
# the top-level :py:class:`~hdmf.common.table.VectorIndex` for the column.
# Accessing the column named 'col7' using dot notation will return the
# :py:class:`~hdmf.common.table.VectorData` object
table_double_ragged_col['col7'] # returns col7_ind_ind
table_double_ragged_col.col7 # returns the col7 VectorData object
###############################################################################
# Accessing data from a ``DynamicTable`` that contain references to rows of other ``DynamicTable`` objects
# --------------------------------------------------------------------------------------------------------
# By default, when
# :py:meth:`DynamicTable.__getitem__ <hdmf.common.table.DynamicTable.__getitem__>`
# and :py:meth:`DynamicTable.get <hdmf.common.table.DynamicTable.get>` are supplied
# with an int, list of ints, numpy array, or a slice representing rows to return,
# a pandas :py:class:`~pandas.DataFrame` is returned. If the
# :py:class:`~hdmf.common.table.DynamicTable` contains a
# :py:class:`~hdmf.common.table.DynamicTableRegion` column that references rows
# of other ``DynamicTable`` objects, then by default, the
# :py:meth:`DynamicTable.__getitem__ <hdmf.common.table.DynamicTable.__getitem__>`
# and :py:meth:`DynamicTable.get <hdmf.common.table.DynamicTable.get>` methods will
# return row indices of the referenced table, and not the contents of the referenced
# table. To return the contents of the referenced table as a nested
# :py:class:`~pandas.DataFrame` containing only the referenced rows, use
# :py:meth:`DynamicTable.get <hdmf.common.table.DynamicTable.get>` with ``index=False``.
# create a new table of users
users_table = DynamicTable(
name='users',
description='a table containing data/metadata about users, one user per row',
)
# add simple columns to this table
users_table.add_column(
name='first_name',
description='the first name of the user',
)
users_table.add_column(
name='last_name',
description='the last name of the user',
)
# create a new table of addresses to reference
addresses_table = DynamicTable(
name='addresses',
description='a table containing data/metadata about addresses, one address per row',
)
addresses_table.add_column(
name='street_address',
description='the street number and address',
)
addresses_table.add_column(
name='city',
description='the city of the address',
)
# add rows to the addresses table
addresses_table.add_row(
street_address='123 Main St',
city='Springfield'
)
addresses_table.add_row(
street_address='45 British Way',
city='London'
)
# add a column to the users table that references rows of the addresses table
users_table.add_column(
name='address',
description='the address of the user',
table=addresses_table
)
# add rows to the users table
users_table.add_row(
first_name='Grace',
last_name='Hopper',
address=0 # <-- row index of the address table
)
users_table.add_row(
first_name='Alan',
last_name='Turing',
address=1 # <-- row index of the address table
)
# get the first row of the users table
users_table.get(0)
###############################################################################
#
# get the first row of the users table with a nested dataframe
users_table.get(0, index=False)
###############################################################################
#
# get the first two rows of the users table
users_table.get([0, 1])
###############################################################################
#
# get the first two rows of the users table with nested dataframes
# of the addresses table in the address column
users_table.get([0, 1], index=False)
###############################################################################
# .. note::
# You can also get rows from a :py:class:`~hdmf.common.table.DynamicTable` as a list of
# lists where the i-th nested list contains the values for the i-th row. This method is
# generally not recommended.
###############################################################################
# Displaying the contents of a table with references to another table
# -------------------------------------------------------------------
# Earlier, we converted a :py:class:`~hdmf.common.table.DynamicTable` to a
# :py:class:`~pandas.DataFrame` using
# :py:meth:`DynamicTable.to_dataframe <hdmf.common.table.DynamicTable.to_dataframe>`
# and printed the :py:class:`~pandas.DataFrame` to see its contents.
# This also works when the :py:class:`~hdmf.common.table.DynamicTable` contains a column
# that references another table. However, the entries for this column for each row
# will be printed as a nested :py:class:`~pandas.DataFrame`. This can be difficult to
# read, so to view only the row indices of the referenced table, pass
# ``index=True`` to
# :py:meth:`DynamicTable.to_dataframe <hdmf.common.table.DynamicTable.to_dataframe>`.
users_df = users_table.to_dataframe(index=True)
users_df
###############################################################################
# You can then access the referenced table using the ``table`` attribute of the
# column object. This is useful when reading a table from a file where you may not have
# a variable to access the referenced table.
#
# First, use :py:meth:`DynamicTable.__getitem__ <hdmf.common.table.DynamicTable.__getitem__>`
# (square brackets notation) to get the
# :py:class:`~hdmf.common.table.DynamicTableRegion` object representing the column.
# Then access its ``table`` attribute to get the addresses table and convert the table
# to a :py:class:`~pandas.DataFrame`.
address_column = users_table['address']
read_addresses_table = address_column.table
addresses_df = read_addresses_table.to_dataframe()
###############################################################################
# Get the addresses corresponding to the rows of the users table:
address_indices = users_df['address'] # pandas Series of row indices into the addresses table
addresses_df.iloc[address_indices] # use .iloc because these are row indices not ID values
###############################################################################
# .. note::
# The indices returned by ``users_df['address']`` are row indices and not
# the ID values of the table. However, if you are using default IDs, these
# values will be the same.
###############################################################################
# Creating custom DynamicTable subclasses
# ---------------------------------------
# TODO
###############################################################################
# Defining ``__columns__``
# ^^^^^^^^^^^^^^^^^^^^^^^^
# TODO
| [
"hdmf.common.VectorIndex",
"hdmf.common.VectorData",
"hdmf.common.table.DynamicTableRegion",
"hdmf.backends.hdf5.h5_utils.H5DataIO",
"hdmf.common.DynamicTable.from_dataframe",
"hdmf.common.table.EnumData",
"hdmf.common.DynamicTable",
"numpy.array",
"hdmf.common.table.ElementIdentifiers"
] | [((1242, 1303), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my_table"""', 'description': '"""an example table"""'}), "(name='my_table', description='an example table')\n", (1254, 1303), False, 'from hdmf.common import DynamicTable\n'), ((2226, 2287), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col1"""', 'description': '"""column #1"""', 'data': '[1, 2]'}), "(name='col1', description='column #1', data=[1, 2])\n", (2236, 2287), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((2310, 2375), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col2"""', 'description': '"""column #2"""', 'data': "['a', 'b']"}), "(name='col2', description='column #2', data=['a', 'b'])\n", (2320, 2375), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((2449, 2536), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my table"""', 'description': '"""an example table"""', 'columns': '[col1, col2]'}), "(name='my table', description='an example table', columns=[col1,\n col2])\n", (2461, 2536), False, 'from hdmf.common import DynamicTable\n'), ((2614, 2712), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my table"""', 'description': '"""an example table"""', 'columns': '[col1, col2]', 'id': '[0, 1]'}), "(name='my table', description='an example table', columns=[col1,\n col2], id=[0, 1])\n", (2626, 2712), False, 'from hdmf.common import DynamicTable\n'), ((5498, 5639), 'hdmf.common.table.EnumData', 'EnumData', ([], {'name': '"""cell_type"""', 'description': '"""this column holds categorical variables"""', 'data': '[0, 1, 2, 1, 0]', 'elements': "['aa', 'bb', 'cc']"}), "(name='cell_type', description=\n 'this column holds categorical variables', data=[0, 1, 2, 1, 0],\n elements=['aa', 'bb', 'cc'])\n", (5506, 5639), False, 'from hdmf.common.table import EnumData\n'), ((5661, 5747), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my_table"""', 'description': '"""an example table"""', 'columns': '[enum_col]'}), "(name='my_table', description='an example table', columns=[\n enum_col])\n", (5673, 5747), False, 'from hdmf.common import DynamicTable\n'), ((6402, 6481), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col1"""', 'description': '"""column #1"""', 'data': "['1a', '1b', '1c', '2a']"}), "(name='col1', description='column #1', data=['1a', '1b', '1c', '2a'])\n", (6412, 6481), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((6701, 6757), 'hdmf.common.VectorIndex', 'VectorIndex', ([], {'name': '"""col1_index"""', 'target': 'col1', 'data': '[3, 4]'}), "(name='col1_index', target=col1, data=[3, 4])\n", (6712, 6757), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((6793, 6884), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my table"""', 'description': '"""an example table"""', 'columns': '[col1, col1_ind]'}), "(name='my table', description='an example table', columns=[col1,\n col1_ind])\n", (6805, 6884), False, 'from hdmf.common import DynamicTable\n'), ((7629, 7690), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my_table"""', 'description': '"""an example table"""'}), "(name='my_table', description='an example table')\n", (7641, 7690), False, 'from hdmf.common import DynamicTable\n'), ((8856, 8978), 'hdmf.common.table.DynamicTableRegion', 'DynamicTableRegion', ([], {'name': '"""table1_ref"""', 'description': '"""references rows of earlier table"""', 'data': '[0, 1, 0, 0]', 'table': 'table'}), "(name='table1_ref', description=\n 'references rows of earlier table', data=[0, 1, 0, 0], table=table)\n", (8874, 8978), False, 'from hdmf.common.table import DynamicTableRegion\n'), ((9053, 9128), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col2"""', 'description': '"""column #2"""', 'data': "['a', 'a', 'a', 'b']"}), "(name='col2', description='column #2', data=['a', 'a', 'a', 'b'])\n", (9063, 9128), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((9154, 9249), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my_table"""', 'description': '"""an example table"""', 'columns': '[dtr_col, data_col]'}), "(name='my_table', description='an example table', columns=[\n dtr_col, data_col])\n", (9166, 9249), False, 'from hdmf.common import DynamicTable\n'), ((9807, 9952), 'hdmf.common.table.DynamicTableRegion', 'DynamicTableRegion', ([], {'name': '"""table1_ref2"""', 'description': '"""references multiple rows of earlier table"""', 'data': '[0, 0, 1, 1, 0, 0, 1]', 'table': 'table'}), "(name='table1_ref2', description=\n 'references multiple rows of earlier table', data=[0, 0, 1, 1, 0, 0, 1],\n table=table)\n", (9825, 9952), False, 'from hdmf.common.table import DynamicTableRegion\n'), ((10113, 10198), 'hdmf.common.VectorIndex', 'VectorIndex', ([], {'name': '"""table1_ref2_index"""', 'target': 'indexed_dtr_col', 'data': '[2, 3, 5, 7]'}), "(name='table1_ref2_index', target=indexed_dtr_col, data=[2, 3, 5, 7]\n )\n", (10124, 10198), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((10219, 10321), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my_table"""', 'description': '"""an example table"""', 'columns': '[dtr_idx, indexed_dtr_col]'}), "(name='my_table', description='an example table', columns=[\n dtr_idx, indexed_dtr_col])\n", (10231, 10321), False, 'from hdmf.common import DynamicTable\n'), ((11453, 11610), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""expandable_table"""', 'description': '"""an example table that can be expanded after being saved to a file"""', 'columns': '[col1, col2]', 'id': 'ids'}), "(name='expandable_table', description=\n 'an example table that can be expanded after being saved to a file',\n columns=[col1, col2], id=ids)\n", (11465, 11610), False, 'from hdmf.common import DynamicTable\n'), ((14666, 14717), 'hdmf.common.DynamicTable.from_dataframe', 'DynamicTable.from_dataframe', ([], {'name': '"""my_table"""', 'df': 'df'}), "(name='my_table', df=df)\n", (14693, 14717), False, 'from hdmf.common import DynamicTable\n'), ((21855, 21966), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col5"""', 'description': '"""column #5"""', 'data': "[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]"}), "(name='col5', description='column #5', data=[['a', 'b', 'c'], [\n 'd', 'e', 'f'], ['g', 'h', 'i']])\n", (21865, 21966), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((22265, 22376), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col6"""', 'description': '"""column #6"""', 'data': "[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]"}), "(name='col6', description='column #6', data=[['a', 'b', 'c'], [\n 'd', 'e', 'f'], ['g', 'h', 'i']])\n", (22275, 22376), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((22398, 22454), 'hdmf.common.VectorIndex', 'VectorIndex', ([], {'name': '"""col6_index"""', 'target': 'col6', 'data': '[2, 3]'}), "(name='col6_index', target=col6, data=[2, 3])\n", (22409, 22454), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((23519, 23643), 'hdmf.common.VectorData', 'VectorData', ([], {'name': '"""col7"""', 'description': '"""column #6"""', 'data': "['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm']"}), "(name='col7', description='column #6', data=['a', 'b', 'c', 'd',\n 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm'])\n", (23529, 23643), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((23666, 23737), 'hdmf.common.VectorIndex', 'VectorIndex', ([], {'name': '"""col7_index"""', 'target': 'col7', 'data': '[3, 6, 8, 10, 12, 13]'}), "(name='col7_index', target=col7, data=[3, 6, 8, 10, 12, 13])\n", (23677, 23737), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((23768, 23837), 'hdmf.common.VectorIndex', 'VectorIndex', ([], {'name': '"""col7_index_index"""', 'target': 'col7_ind', 'data': '[2, 5, 6]'}), "(name='col7_index_index', target=col7_ind, data=[2, 5, 6])\n", (23779, 23837), False, 'from hdmf.common import VectorData, VectorIndex\n'), ((23921, 24026), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""my table"""', 'description': '"""an example table"""', 'columns': '[col7, col7_ind, col7_ind_ind]'}), "(name='my table', description='an example table', columns=[col7,\n col7_ind, col7_ind_ind])\n", (23933, 24026), False, 'from hdmf.common import DynamicTable\n'), ((26266, 26375), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""users"""', 'description': '"""a table containing data/metadata about users, one user per row"""'}), "(name='users', description=\n 'a table containing data/metadata about users, one user per row')\n", (26278, 26375), False, 'from hdmf.common import DynamicTable\n'), ((26672, 26792), 'hdmf.common.DynamicTable', 'DynamicTable', ([], {'name': '"""addresses"""', 'description': '"""a table containing data/metadata about addresses, one address per row"""'}), "(name='addresses', description=\n 'a table containing data/metadata about addresses, one address per row')\n", (26684, 26792), False, 'from hdmf.common import DynamicTable\n'), ((17670, 17686), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (17678, 17686), True, 'import numpy as np\n'), ((21247, 21263), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (21255, 21263), True, 'import numpy as np\n'), ((3238, 3280), 'hdmf.common.table.ElementIdentifiers', 'ElementIdentifiers', ([], {'name': '"""id"""', 'data': '[0, 1]'}), "(name='id', data=[0, 1])\n", (3256, 3280), False, 'from hdmf.common.table import ElementIdentifiers\n'), ((11124, 11163), 'hdmf.backends.hdf5.h5_utils.H5DataIO', 'H5DataIO', ([], {'data': '[1, 2]', 'maxshape': '(None,)'}), '(data=[1, 2], maxshape=(None,))\n', (11132, 11163), False, 'from hdmf.backends.hdf5.h5_utils import H5DataIO\n'), ((11252, 11295), 'hdmf.backends.hdf5.h5_utils.H5DataIO', 'H5DataIO', ([], {'data': "['a', 'b']", 'maxshape': '(None,)'}), "(data=['a', 'b'], maxshape=(None,))\n", (11260, 11295), False, 'from hdmf.backends.hdf5.h5_utils import H5DataIO\n'), ((11390, 11429), 'hdmf.backends.hdf5.h5_utils.H5DataIO', 'H5DataIO', ([], {'data': '[0, 1]', 'maxshape': '(None,)'}), '(data=[0, 1], maxshape=(None,))\n', (11398, 11429), False, 'from hdmf.backends.hdf5.h5_utils import H5DataIO\n'), ((16029, 16045), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (16037, 16045), True, 'import numpy as np\n')] |
import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report, confusion_matrix
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup
from data import BertEntityPairDataset, bert_collate_func
from EntityPairItem import BertEntityPairItem
from model import *
from args import get_args
from args import print_args
from utils import (AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy,
set_all_seed)
def train(train_loader,
model,
criterion,
optimizer,
scheduler,
accumulate_step,
epoch,
figure_writer,
phase="train"):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
model.train()
model.zero_grad()
end = time.time()
for i, ((cui1, cui2, sentences, split, entity_1_begin_idxs,
entity_2_begin_idxs), labels) in enumerate(train_loader):
data_time.update(time.time() - end)
sentences = {key: value.to(device) for key, value in sentences.items()}
labels = labels.to(device)
score = model(sentences, split, entity_1_begin_idxs,entity_2_begin_idxs)
one_hot_labels = F.one_hot(labels, 4)[:, 1:].float()
loss = criterion(score, one_hot_labels) / accumulate_step
losses.update(loss.item(), labels.size(0))
acc.update(accuracy(labels.detach(), score.detach()), labels.size(0))
figure_writer.add_scalar('%s/loss' % phase,
loss.item(),
global_step=epoch * len(train_loader) + i)
figure_writer.add_scalar('%s/accuracy' % phase,
accuracy(labels.detach(), score.detach()),
global_step=epoch * len(train_loader) + i)
loss.backward()
if (i % accumulate_step) == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
labels = labels.cpu()
sentences = {key: value.cpu() for key, value in sentences.items()}
del labels, sentences
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=acc))
def validate(val_loader, model, criterion, epoch, summary, figure_writer,
phase):
losses = AverageMeter()
pred = []
label = []
scores = []
model.eval()
with torch.no_grad():
with tqdm(total=len(val_loader), ncols=50) as pbar:
pbar.set_description("Validation iter:")
for i, ((cui1, cui2, sentences, split, entity_1_begin_idxs,
entity_2_begin_idxs), labels) in enumerate(val_loader):
sentences = {
key: value.to(device)
for key, value in sentences.items()
}
labels = labels.to(device)
score = model(sentences, split, entity_1_begin_idxs,
entity_2_begin_idxs)
one_hot_labels = F.one_hot(labels, 4)[:, 1:].float()
loss = criterion(score, one_hot_labels)
neg_index = (score.max(dim=1).values < 0.1)
preds = score.argmax(dim=1) + 1
preds[neg_index] = 0
scores.append(score.detach().cpu().numpy())
pred += list(preds.detach().cpu().numpy())
label += list(labels.detach().cpu().numpy())
losses.update(loss.item(), labels.size(0))
figure_writer.add_scalar('%s/cls_loss' % phase,
loss.item(),
global_step=epoch * len(val_loader) +
i)
figure_writer.add_scalar('%s/accuracy' % phase,
accuracy(labels.detach(),
score.detach()),
global_step=epoch * len(val_loader) +
i)
labels = labels.cpu()
sentences = {
key: value.cpu()
for key, value in sentences.items()
}
del labels, sentences
pbar.update(1)
summary.update(epoch, np.vstack(scores), pred, label, losses.avg)
if __name__ == "__main__":
args = get_args()
print_args(args)
device = torch.device("cuda", args.cuda)
set_all_seed(args.seed)
print("loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
subtypes = [
"t020", "t190", "t049", "t019", "t047", "t050", "t033", "t037", "t048",
"t191", "t046", "t184"
]
custom_tokens = ["<sep>", "<empty_title>"]
entity_1_begin_tokens = ["<entity-%s-1>" % subtype for subtype in subtypes]
entity_2_begin_tokens = ["<entity-%s-2>" % subtype for subtype in subtypes]
entity_1_end_tokens = ["</entity-%s-1>" % subtype for subtype in subtypes]
entity_2_end_tokens = ["</entity-%s-2>" % subtype for subtype in subtypes]
special_tokens_dict = {'additional_special_tokens':custom_tokens+\
entity_1_begin_tokens+entity_1_end_tokens+entity_2_begin_tokens+entity_2_end_tokens}
tokenizer.add_special_tokens(special_tokens_dict)
print("building model...")
model = BertScoreEncoder(args.model, score_func="TuckER",
label_num=3, hidden_dim=100, dropout=0.5).to(device)
model.text_encoder.resize_token_embeddings(len(tokenizer))
print("loading dataset...")
train_dataset = BertEntityPairDataset(
args.train_pkl,
args.dict,
sample_num=args.sampleNum,
max_length=args.maxLength,
tokenizer=tokenizer,
entity_1_begin_tokens=entity_1_begin_tokens,
entity_2_begin_tokens=entity_2_begin_tokens)
train_dataloader = DataLoader(train_dataset,
batch_size=args.trainBatchSize,
shuffle=True,
drop_last=True,
collate_fn=bert_collate_func,
num_workers=args.nworkers,
pin_memory=args.pinMemory)
if args.do_eval:
test_dataset = BertEntityPairDataset(
args.valid_pkl,
args.dict,
sample_num=args.sampleNum,
max_length=args.maxLength,
tokenizer=tokenizer,
entity_1_begin_tokens=entity_1_begin_tokens,
entity_2_begin_tokens=entity_2_begin_tokens)
test_dataloader = DataLoader(test_dataset,
batch_size=args.testBatchSize,
shuffle=False,
collate_fn=bert_collate_func,
num_workers=args.nworkers,
pin_memory=args.pinMemory)
if args.do_pred:
pred_dataset = BertEntityPairDataset(
args.pred_pkl,
args.dict,
sample_num=args.sampleNum,
max_length=args.maxLength,
tokenizer=tokenizer,
entity_1_begin_tokens=entity_1_begin_tokens,
entity_2_begin_tokens=entity_2_begin_tokens)
pred_dataloader = DataLoader(pred_dataset,
batch_size=args.testBatchSize,
shuffle=False,
collate_fn=bert_collate_func,
num_workers=args.nworkers,
pin_memory=args.pinMemory)
t_total = len(train_dataloader) * args.epoch
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_rate * t_total,
num_training_steps=t_total)
criterion = nn.BCELoss().to(device)
if not os.path.exists(args.output_path):
os.makedirs(args.output_path)
figure_writer = SummaryWriter(comment=str(model))
testWriter = Summary(args.output_path, str(model), "test")
predWriter = Summary(args.output_path, str(model), "pred")
checkpoint = ModelCheckpoint(args.output_path, str(model))
for e in range(args.epoch):
if args.do_train:
train(train_dataloader, model, criterion, optimizer, scheduler,
args.accumulate_step, e, figure_writer)
torch.cuda.empty_cache()
if args.do_eval:
validate(test_dataloader,
model,
criterion,
e,
testWriter,
figure_writer,
phase="test")
if args.do_pred:
validate(pred_dataloader,
model,
criterion,
e,
predWriter,
figure_writer,
phase="annotated")
modelname = "%s/%s_epoch_%d/" % (args.output_path, str(model), e)
scorer_modelname = "%s/%s_epoch_%d_scorer.pth" % (args.output_path, str(model), e)
if not os.path.exists(modelname):
os.makedirs(modelname)
model.text_encoder.save_pretrained(modelname)
torch.save(model.text_scorer.state_dict(), scorer_modelname)
testWriter.save()
predWriter.save()
| [
"args.print_args",
"os.makedirs",
"torch.utils.data.DataLoader",
"utils.AverageMeter",
"torch.nn.BCELoss",
"os.path.exists",
"args.get_args",
"torch.nn.functional.one_hot",
"time.time",
"data.BertEntityPairDataset",
"transformers.AutoTokenizer.from_pretrained",
"transformers.get_linear_schedul... | [((975, 989), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (987, 989), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((1006, 1020), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1018, 1020), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((1034, 1048), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1046, 1048), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((1059, 1073), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1071, 1073), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((1126, 1137), 'time.time', 'time.time', ([], {}), '()\n', (1135, 1137), False, 'import time\n'), ((3219, 3233), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3231, 3233), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((5332, 5342), 'args.get_args', 'get_args', ([], {}), '()\n', (5340, 5342), False, 'from args import get_args\n'), ((5347, 5363), 'args.print_args', 'print_args', (['args'], {}), '(args)\n', (5357, 5363), False, 'from args import print_args\n'), ((5378, 5409), 'torch.device', 'torch.device', (['"""cuda"""', 'args.cuda'], {}), "('cuda', args.cuda)\n", (5390, 5409), False, 'import torch\n'), ((5414, 5437), 'utils.set_all_seed', 'set_all_seed', (['args.seed'], {}), '(args.seed)\n', (5426, 5437), False, 'from utils import AlphaTest, AverageMeter, ModelCheckpoint, Summary, accuracy, set_all_seed\n'), ((5489, 5534), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.tokenizer'], {}), '(args.tokenizer)\n', (5518, 5534), False, 'from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((6532, 6754), 'data.BertEntityPairDataset', 'BertEntityPairDataset', (['args.train_pkl', 'args.dict'], {'sample_num': 'args.sampleNum', 'max_length': 'args.maxLength', 'tokenizer': 'tokenizer', 'entity_1_begin_tokens': 'entity_1_begin_tokens', 'entity_2_begin_tokens': 'entity_2_begin_tokens'}), '(args.train_pkl, args.dict, sample_num=args.sampleNum,\n max_length=args.maxLength, tokenizer=tokenizer, entity_1_begin_tokens=\n entity_1_begin_tokens, entity_2_begin_tokens=entity_2_begin_tokens)\n', (6553, 6754), False, 'from data import BertEntityPairDataset, bert_collate_func\n'), ((6827, 7006), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.trainBatchSize', 'shuffle': '(True)', 'drop_last': '(True)', 'collate_fn': 'bert_collate_func', 'num_workers': 'args.nworkers', 'pin_memory': 'args.pinMemory'}), '(train_dataset, batch_size=args.trainBatchSize, shuffle=True,\n drop_last=True, collate_fn=bert_collate_func, num_workers=args.nworkers,\n pin_memory=args.pinMemory)\n', (6837, 7006), False, 'from torch.utils.data import DataLoader\n'), ((8801, 8921), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(args.warmup_rate * t_total)', 'num_training_steps': 't_total'}), '(optimizer, num_warmup_steps=args.\n warmup_rate * t_total, num_training_steps=t_total)\n', (8832, 8921), False, 'from transformers import AdamW, AutoTokenizer, get_linear_schedule_with_warmup\n'), ((2509, 2520), 'time.time', 'time.time', ([], {}), '()\n', (2518, 2520), False, 'import time\n'), ((3305, 3320), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3318, 3320), False, 'import torch\n'), ((5248, 5265), 'numpy.vstack', 'np.vstack', (['scores'], {}), '(scores)\n', (5257, 5265), True, 'import numpy as np\n'), ((7248, 7470), 'data.BertEntityPairDataset', 'BertEntityPairDataset', (['args.valid_pkl', 'args.dict'], {'sample_num': 'args.sampleNum', 'max_length': 'args.maxLength', 'tokenizer': 'tokenizer', 'entity_1_begin_tokens': 'entity_1_begin_tokens', 'entity_2_begin_tokens': 'entity_2_begin_tokens'}), '(args.valid_pkl, args.dict, sample_num=args.sampleNum,\n max_length=args.maxLength, tokenizer=tokenizer, entity_1_begin_tokens=\n entity_1_begin_tokens, entity_2_begin_tokens=entity_2_begin_tokens)\n', (7269, 7470), False, 'from data import BertEntityPairDataset, bert_collate_func\n'), ((7574, 7737), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'args.testBatchSize', 'shuffle': '(False)', 'collate_fn': 'bert_collate_func', 'num_workers': 'args.nworkers', 'pin_memory': 'args.pinMemory'}), '(test_dataset, batch_size=args.testBatchSize, shuffle=False,\n collate_fn=bert_collate_func, num_workers=args.nworkers, pin_memory=\n args.pinMemory)\n', (7584, 7737), False, 'from torch.utils.data import DataLoader\n'), ((7959, 8180), 'data.BertEntityPairDataset', 'BertEntityPairDataset', (['args.pred_pkl', 'args.dict'], {'sample_num': 'args.sampleNum', 'max_length': 'args.maxLength', 'tokenizer': 'tokenizer', 'entity_1_begin_tokens': 'entity_1_begin_tokens', 'entity_2_begin_tokens': 'entity_2_begin_tokens'}), '(args.pred_pkl, args.dict, sample_num=args.sampleNum,\n max_length=args.maxLength, tokenizer=tokenizer, entity_1_begin_tokens=\n entity_1_begin_tokens, entity_2_begin_tokens=entity_2_begin_tokens)\n', (7980, 8180), False, 'from data import BertEntityPairDataset, bert_collate_func\n'), ((8284, 8447), 'torch.utils.data.DataLoader', 'DataLoader', (['pred_dataset'], {'batch_size': 'args.testBatchSize', 'shuffle': '(False)', 'collate_fn': 'bert_collate_func', 'num_workers': 'args.nworkers', 'pin_memory': 'args.pinMemory'}), '(pred_dataset, batch_size=args.testBatchSize, shuffle=False,\n collate_fn=bert_collate_func, num_workers=args.nworkers, pin_memory=\n args.pinMemory)\n', (8294, 8447), False, 'from torch.utils.data import DataLoader\n'), ((8995, 9027), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (9009, 9027), False, 'import os\n'), ((9037, 9066), 'os.makedirs', 'os.makedirs', (['args.output_path'], {}), '(args.output_path)\n', (9048, 9066), False, 'import os\n'), ((8959, 8971), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (8969, 8971), True, 'import torch.nn as nn\n'), ((9516, 9540), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (9538, 9540), False, 'import torch\n'), ((10229, 10254), 'os.path.exists', 'os.path.exists', (['modelname'], {}), '(modelname)\n', (10243, 10254), False, 'import os\n'), ((10268, 10290), 'os.makedirs', 'os.makedirs', (['modelname'], {}), '(modelname)\n', (10279, 10290), False, 'import os\n'), ((1299, 1310), 'time.time', 'time.time', ([], {}), '()\n', (1308, 1310), False, 'import time\n'), ((2476, 2487), 'time.time', 'time.time', ([], {}), '()\n', (2485, 2487), False, 'import time\n'), ((1542, 1562), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels', '(4)'], {}), '(labels, 4)\n', (1551, 1562), True, 'import torch.nn.functional as F\n'), ((3932, 3952), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels', '(4)'], {}), '(labels, 4)\n', (3941, 3952), True, 'import torch.nn.functional as F\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.core import Dense,Activation,Dropout
from keras.layers import LSTM
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from keras.callbacks import EarlyStopping
import math
# In[2]:
df=pd.read_csv("../../../input/ronitf_heart-disease-uci/heart.csv",delimiter=',')
df.head(3)
# In[3]:
df.corr()
# In[4]:
fig, ax = plt.subplots(figsize=(15,15))
print()
# In[5]:
label=df['target']
df.shape
del df['target']
df.shape
# In[6]:
sns.countplot(label)
# In[7]:
X_train,Y_train,X_test,Y_test=train_test_split(df,label,test_size=0.3,random_state=0)
print(X_train.shape)
print(Y_train.shape)
print(X_test.shape)
print(Y_test.shape)
# In[8]:
X_valid,X_rftest,Y_valid,Y_rftest=train_test_split(Y_train,Y_test,test_size=0.5,random_state=0)
# In[9]:
#Hyperparameter tuning
maxi=0
l=np.arange(1,5000,25)
for x in range(len(l)):
model=RandomForestClassifier(random_state=l[x],verbose=1)
model.fit(X_train,X_test)
a=model.predict(X_valid)
if(maxi<round(accuracy_score(a,Y_valid)*100,2)):
maxi=round(accuracy_score(a,Y_valid)*100,2)
c1=l[x]
print(c1)
# In[10]:
maxi=0
for i in range(c1-25,c1+25):
model=RandomForestClassifier(random_state=i,verbose=1)
model.fit(X_train,X_test)
a=model.predict(X_valid)
if(maxi<round(accuracy_score(a,Y_valid)*100,2)):
maxi=round(accuracy_score(a,Y_valid)*100,2)
c1=i
model=RandomForestClassifier(random_state=c1)
model.fit(X_train,X_test)
predict1=model.predict(Y_train)
weight1=round(accuracy_score(predict1,Y_test)*100,2)
print('Random forest accuracy score:',round(accuracy_score(predict1,Y_test)*100,2))
c1=round(accuracy_score(predict1,Y_test)*100,2)
# In[11]:
model=svm.SVC(kernel='linear',verbose=1,gamma='scale', decision_function_shape='ovo')
model.fit(X_train,X_test)
predict2=model.predict(Y_train)
c=0
for i in range(len(predict2)):
if(predict2[i]==Y_test.iloc[i]):
c+=1
c2=(c/len(predict2))*100
print('Linear Svm Accuracy Score is',c2)
weight2=c2
# In[12]:
model = XGBClassifier(objective="binary:logistic")
model.fit(X_train, X_test)
predict3=model.predict(Y_train)
c=0
for i in range(len(predict3)):
if(predict3[i]==Y_test.iloc[i]):
c+=1
c3=(c/len(predict3))*100
print('XGBoost Accuracy Score is',c3)
weight3=c3
# In[13]:
X_train=np.expand_dims(X_train, axis=2)
Y_train=np.expand_dims(Y_train, axis=2)
es=EarlyStopping(patience=7)
model=Sequential()
model.add(LSTM(13,input_shape=(13,1)))
model.add(Dense(output_dim=1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='Adam',metrics=['accuracy'])
model.fit(X_train,X_test,epochs=100,batch_size=1,verbose=1,callbacks=[es])
predict4=model.predict(Y_train)
c4=model.evaluate(Y_train,Y_test)
weight4=c4[1]
# In[14]:
print('SVM Accuracy Score:',c2)
print('XGBoost Accuracy Score:',c3)
print('LSTM accuracy Score:',c4[1]*100)
print('Random Forest Classifier:',c1)
# **Voting-based-Model**
# In[22]:
l=[]
for i in range(len(Y_train)):
c1,c2=0,0
if(predict1[i]==0):
c1+=weight1
else:
c2+=weight1
if(predict2[i]==0):
c1+=weight2
else:
c2+=weight2
if(predict3[i]==0):
c1+=weight3
else:
c2+=weight3
if(predict4[i]==0):
c1+=weight4
else:
c2+=weight4
if(c1>c2):
l.append(0)
else:
l.append(1)
c=0
for i in range(len(Y_train)):
if(l[i]==Y_test.iloc[i]):
c+=1
print('Accuracy of Voting Based Model',c/len(Y_train))
| [
"sklearn.ensemble.RandomForestClassifier",
"keras.layers.core.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"keras.layers.LSTM",
"keras.layers.core.Activation",
"sklearn.metrics.accuracy_score",
"numpy.expand_dims",
"keras.callbacks.EarlyStopping",
"numpy.arange",
"seabo... | [((688, 767), 'pandas.read_csv', 'pd.read_csv', (['"""../../../input/ronitf_heart-disease-uci/heart.csv"""'], {'delimiter': '""","""'}), "('../../../input/ronitf_heart-disease-uci/heart.csv', delimiter=',')\n", (699, 767), True, 'import pandas as pd\n'), ((824, 854), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (836, 854), True, 'import matplotlib.pyplot as plt\n'), ((942, 962), 'seaborn.countplot', 'sns.countplot', (['label'], {}), '(label)\n', (955, 962), True, 'import seaborn as sns\n'), ((1006, 1064), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df', 'label'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(df, label, test_size=0.3, random_state=0)\n', (1022, 1064), False, 'from sklearn.model_selection import train_test_split\n'), ((1191, 1255), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Y_train', 'Y_test'], {'test_size': '(0.5)', 'random_state': '(0)'}), '(Y_train, Y_test, test_size=0.5, random_state=0)\n', (1207, 1255), False, 'from sklearn.model_selection import train_test_split\n'), ((1298, 1320), 'numpy.arange', 'np.arange', (['(1)', '(5000)', '(25)'], {}), '(1, 5000, 25)\n', (1307, 1320), True, 'import numpy as np\n'), ((1893, 1932), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'c1'}), '(random_state=c1)\n', (1915, 1932), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2196, 2282), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'verbose': '(1)', 'gamma': '"""scale"""', 'decision_function_shape': '"""ovo"""'}), "(kernel='linear', verbose=1, gamma='scale', decision_function_shape=\n 'ovo')\n", (2203, 2282), False, 'from sklearn import svm\n'), ((2519, 2561), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'objective': '"""binary:logistic"""'}), "(objective='binary:logistic')\n", (2532, 2561), False, 'from xgboost import XGBClassifier\n'), ((2802, 2833), 'numpy.expand_dims', 'np.expand_dims', (['X_train'], {'axis': '(2)'}), '(X_train, axis=2)\n', (2816, 2833), True, 'import numpy as np\n'), ((2843, 2874), 'numpy.expand_dims', 'np.expand_dims', (['Y_train'], {'axis': '(2)'}), '(Y_train, axis=2)\n', (2857, 2874), True, 'import numpy as np\n'), ((2878, 2903), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(7)'}), '(patience=7)\n', (2891, 2903), False, 'from keras.callbacks import EarlyStopping\n'), ((2910, 2922), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2920, 2922), False, 'from keras.models import Sequential\n'), ((1353, 1405), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'l[x]', 'verbose': '(1)'}), '(random_state=l[x], verbose=1)\n', (1375, 1405), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1661, 1710), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'i', 'verbose': '(1)'}), '(random_state=i, verbose=1)\n', (1683, 1710), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2933, 2962), 'keras.layers.LSTM', 'LSTM', (['(13)'], {'input_shape': '(13, 1)'}), '(13, input_shape=(13, 1))\n', (2937, 2962), False, 'from keras.layers import LSTM\n'), ((2972, 2991), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': '(1)'}), '(output_dim=1)\n', (2977, 2991), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((3003, 3024), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (3013, 3024), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2005, 2037), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict1', 'Y_test'], {}), '(predict1, Y_test)\n', (2019, 2037), False, 'from sklearn.metrics import accuracy_score\n'), ((2137, 2169), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict1', 'Y_test'], {}), '(predict1, Y_test)\n', (2151, 2169), False, 'from sklearn.metrics import accuracy_score\n'), ((2088, 2120), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['predict1', 'Y_test'], {}), '(predict1, Y_test)\n', (2102, 2120), False, 'from sklearn.metrics import accuracy_score\n'), ((1482, 1508), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'Y_valid'], {}), '(a, Y_valid)\n', (1496, 1508), False, 'from sklearn.metrics import accuracy_score\n'), ((1536, 1562), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'Y_valid'], {}), '(a, Y_valid)\n', (1550, 1562), False, 'from sklearn.metrics import accuracy_score\n'), ((1787, 1813), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'Y_valid'], {}), '(a, Y_valid)\n', (1801, 1813), False, 'from sklearn.metrics import accuracy_score\n'), ((1841, 1867), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'Y_valid'], {}), '(a, Y_valid)\n', (1855, 1867), False, 'from sklearn.metrics import accuracy_score\n')] |
"""
9. 19. 2020
by <NAME>
This file is used to generate 3D models from input 4-channel images
Run from anaconda console
"""
import torch
import torch.nn.parallel
import examples.recon.datasets as datasets
from examples.recon.utils import AverageMeter, img_cvt
import soft_renderer as sr
import soft_renderer.functional as srf
import examples.recon.models as models
import time
import os
import imageio
import numpy as np
import PIL
BATCH_SIZE = 100
IMAGE_SIZE = 64
CLASS_IDS_ALL = (
'02691156,02828884,02933112,02958343,03001627,03211117,03636649,' +
'03691459,04090263,04256520,04379243,04401088,04530566')
PRINT_FREQ = 100
SAVE_FREQ = 100
MODEL_DIRECTORY = '/mnt/zhengwen/model_synthesis/SoftRas/data/results/models/checkpoint_0210000.pth.tar'
DATASET_DIRECTORY = '/mnt/zhengwen/model_synthesis/SoftRas/data/datasets'
SIGMA_VAL = 0.01
IMAGE_PATH = ''
# arguments
class Args:
experiment_id = 'Sept_18_2020'
model_directory = MODEL_DIRECTORY
dataset_directory = DATASET_DIRECTORY
class_ids = CLASS_IDS_ALL
image_size = IMAGE_SIZE
batch_size = BATCH_SIZE
image_path = IMAGE_PATH
sigma_val = SIGMA_VAL
print_freq = PRINT_FREQ
save_freq = SAVE_FREQ
args = Args()
# setup model & optimizer
model = models.Model('/mnt/zhengwen/model_synthesis/SoftRas/data/obj/sphere/sphere_642.obj', args=args)
model = model.cuda()
state_dicts = torch.load(args.model_directory)
model.load_state_dict(state_dicts['model'], strict=False)
model.eval()
directory_output = '/mnt/zhengwen/model_synthesis/photo_from_life/123'
os.makedirs(directory_output, exist_ok=True)
directory_mesh = os.path.join(directory_output, args.experiment_id)
os.makedirs(directory_mesh, exist_ok=True)
IMG_PATH = '/mnt/zhengwen/model_synthesis/photo_from_life/texture'
end = time.time()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses1 = AverageMeter()
iou_all = []
images = []
img_list = sorted(os.listdir(IMG_PATH))
for img_name in img_list:
img = PIL.Image.open(os.path.join(IMG_PATH, img_name))
img = np.asanyarray(img)
images.append(img)
images = np.array(images)
images = images.transpose((0, 3, 1, 2))
images = np.ascontiguousarray(images)
images = torch.from_numpy(images.astype('float32') / 255.)
images = torch.autograd.Variable(images).cuda()
vertices, faces = model.reconstruct(images)
for k in range(len(img_list)):
print(k)
mesh_path = os.path.join(directory_output, img_list[k][:-4] + ".obj")
srf.save_obj(mesh_path, vertices[k], faces[k])
| [
"os.makedirs",
"examples.recon.models.Model",
"torch.autograd.Variable",
"torch.load",
"numpy.ascontiguousarray",
"numpy.asanyarray",
"time.time",
"numpy.array",
"soft_renderer.functional.save_obj",
"examples.recon.utils.AverageMeter",
"os.path.join",
"os.listdir"
] | [((1262, 1366), 'examples.recon.models.Model', 'models.Model', (['"""/mnt/zhengwen/model_synthesis/SoftRas/data/obj/sphere/sphere_642.obj"""'], {'args': 'args'}), "(\n '/mnt/zhengwen/model_synthesis/SoftRas/data/obj/sphere/sphere_642.obj',\n args=args)\n", (1274, 1366), True, 'import examples.recon.models as models\n'), ((1394, 1426), 'torch.load', 'torch.load', (['args.model_directory'], {}), '(args.model_directory)\n', (1404, 1426), False, 'import torch\n'), ((1570, 1614), 'os.makedirs', 'os.makedirs', (['directory_output'], {'exist_ok': '(True)'}), '(directory_output, exist_ok=True)\n', (1581, 1614), False, 'import os\n'), ((1632, 1682), 'os.path.join', 'os.path.join', (['directory_output', 'args.experiment_id'], {}), '(directory_output, args.experiment_id)\n', (1644, 1682), False, 'import os\n'), ((1683, 1725), 'os.makedirs', 'os.makedirs', (['directory_mesh'], {'exist_ok': '(True)'}), '(directory_mesh, exist_ok=True)\n', (1694, 1725), False, 'import os\n'), ((1802, 1813), 'time.time', 'time.time', ([], {}), '()\n', (1811, 1813), False, 'import time\n'), ((1828, 1842), 'examples.recon.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1840, 1842), False, 'from examples.recon.utils import AverageMeter, img_cvt\n'), ((1855, 1869), 'examples.recon.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1867, 1869), False, 'from examples.recon.utils import AverageMeter, img_cvt\n'), ((1879, 1893), 'examples.recon.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1891, 1893), False, 'from examples.recon.utils import AverageMeter, img_cvt\n'), ((1904, 1918), 'examples.recon.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1916, 1918), False, 'from examples.recon.utils import AverageMeter, img_cvt\n'), ((2135, 2151), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2143, 2151), True, 'import numpy as np\n'), ((2201, 2229), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['images'], {}), '(images)\n', (2221, 2229), True, 'import numpy as np\n'), ((1965, 1985), 'os.listdir', 'os.listdir', (['IMG_PATH'], {}), '(IMG_PATH)\n', (1975, 1985), False, 'import os\n'), ((2083, 2101), 'numpy.asanyarray', 'np.asanyarray', (['img'], {}), '(img)\n', (2096, 2101), True, 'import numpy as np\n'), ((2442, 2499), 'os.path.join', 'os.path.join', (['directory_output', "(img_list[k][:-4] + '.obj')"], {}), "(directory_output, img_list[k][:-4] + '.obj')\n", (2454, 2499), False, 'import os\n'), ((2504, 2550), 'soft_renderer.functional.save_obj', 'srf.save_obj', (['mesh_path', 'vertices[k]', 'faces[k]'], {}), '(mesh_path, vertices[k], faces[k])\n', (2516, 2550), True, 'import soft_renderer.functional as srf\n'), ((2039, 2071), 'os.path.join', 'os.path.join', (['IMG_PATH', 'img_name'], {}), '(IMG_PATH, img_name)\n', (2051, 2071), False, 'import os\n'), ((2298, 2329), 'torch.autograd.Variable', 'torch.autograd.Variable', (['images'], {}), '(images)\n', (2321, 2329), False, 'import torch\n')] |
"""
Generate colored noise.
Cloned from https://github.com/felixpatzelt/colorednoise on Jan 23 2019.
Author: <NAME>, 2014-2017.
"""
from numpy import concatenate, real, std, abs
from numpy.fft import ifft, fftfreq
from numpy.random import normal
def powerlaw_psd_gaussian(exponent, samples, fmin=0):
"""Gaussian (1/f)**beta noise.
Based on the algorithm in:
<NAME>. and <NAME>.:
On generating power law noise.
Astron. Astrophys. 300, 707-710 (1995)
Normalised to unit variance
Parameters:
-----------
exponent : float
The power-spectrum of the generated noise is proportional to
S(f) = (1 / f)**beta
flicker / pink noise: exponent beta = 1
brown noise: exponent beta = 2
Furthermore, the autocorrelation decays proportional to lag**-gamma
with gamma = 1 - beta for 0 < beta < 1.
There may be finite-size issues for beta close to one.
samples : int
number of samples to generate
fmin : float, optional
Low-frequency cutoff.
Default: 0 corresponds to original paper. It is not actually
zero, but 1/samples.
Returns
-------
out : array
The samples.
Examples:
---------
# generate 1/f noise == pink noise == flicker noise
>>> import colorednoise as cn
>>> y = cn.powerlaw_psd_gaussian(1, 5)
"""
# frequencies (we asume a sample rate of one)
f = fftfreq(samples)
# scaling factor for all frequencies
## though the fft for real signals is symmetric,
## the array with the results is not - take neg. half!
s_scale = abs(concatenate([f[f<0], [f[-1]]]))
## low frequency cutoff?!?
if fmin:
ix = sum(s_scale>fmin)
if ix < len(f):
s_scale[ix:] = s_scale[ix]
s_scale = s_scale**(-exponent/2.)
# scale random power + phase
sr = s_scale * normal(size=len(s_scale))
si = s_scale * normal(size=len(s_scale))
if not (samples % 2): si[0] = si[0].real
s = sr + 1J * si
# this is complicated... because for odd sample numbers,
## there is one less positive freq than for even sample numbers
s = concatenate([s[1-(samples % 2):][::-1], s[:-1].conj()])
# time series
y = ifft(s).real
return y / std(y)
| [
"numpy.std",
"numpy.fft.ifft",
"numpy.fft.fftfreq",
"numpy.concatenate"
] | [((1453, 1469), 'numpy.fft.fftfreq', 'fftfreq', (['samples'], {}), '(samples)\n', (1460, 1469), False, 'from numpy.fft import ifft, fftfreq\n'), ((1642, 1674), 'numpy.concatenate', 'concatenate', (['[f[f < 0], [f[-1]]]'], {}), '([f[f < 0], [f[-1]]])\n', (1653, 1674), False, 'from numpy import concatenate, real, std, abs\n'), ((2261, 2268), 'numpy.fft.ifft', 'ifft', (['s'], {}), '(s)\n', (2265, 2268), False, 'from numpy.fft import ifft, fftfreq\n'), ((2290, 2296), 'numpy.std', 'std', (['y'], {}), '(y)\n', (2293, 2296), False, 'from numpy import concatenate, real, std, abs\n')] |
#%% Importing libraries
import os
import glob
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import rc
from mpl_toolkits.mplot3d import Axes3D
from scipy.special import erfc as erfc
from scipy.optimize import curve_fit
#%% Setting a "LaTeX-like" plotting style
plt.style.use('default')
rc('font', **{'family': 'DejaVu Sans', 'serif': ['Computer Modern']})
rc('text', usetex=True)
#%% For loop to reconstruct beam profile
# Initializing 3D frame
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection = '3d')
ax.view_init(20, 140)
ax.set_xlabel('$x$ ($\mu$m)')
ax.set_ylabel('$z$ (mm)')
ax.set_zticklabels([])
cmap = mpl.cm.get_cmap('plasma')
norm = mpl.colors.Normalize(vmin=0, vmax=10)
# Initializing data sets
sets = glob.glob('Data*.txt')
sets.sort()
# Initializing for loop
for i in sets:
Data = np.loadtxt(i, comments = '#')
# Defining variables and functions
z = Data[:, 0] # [mm]
x = Data[:, 1] # [um]
P = Data[:, 2] # [W]
dx = Data[:, 3] # [um]
dP = Data[:, 4] # [W]
P0 = np.amin(P)
Pmax = np.amax(P)
def fit(x, x0, w):
return P0+Pmax/2*erfc(np.sqrt(2)*(x-x0)/w) # [W]
xfit = np.linspace(np.amin(x), np.amax(x), 300) # [um]
pi = [(np.amax(x)-np.amin(x))/2, 1800]
# Fitting
popt, pcov = curve_fit(fit, x, P, pi)
sigma = np.sqrt([pcov[0, 0], pcov[1, 1]])
fit_values = fit(xfit, popt[0], popt[1])
w = popt[1] # [um]
dw = sigma[1] # [um]
# Calculating derivative
def prime(f, a, h):
return (f(a+h)-f(a-h))/(2*h)
def fun(x):
return fit(x, popt[0], w)
g = -prime(fun, x, 0.001)*1000
# Plotting static 3D gaussian profile
k = np.max(g)
rgba = cmap(norm(k))
ax.plot3D(x, z, g, color = rgba)
sm = plt.cm.ScalarMappable(cmap = cmap, norm = norm)
sm.set_array([])
cb = fig.colorbar(sm, shrink = 0.6, aspect = 13)
cb.ax.set_title('$\partial P$ (u.a.)')
plt.savefig('Profile.png', dpi = 300)
# Generating frames for gif of rotating beam profile
for angle in range(0, 360):
ax.view_init(20, angle)
plt.savefig('./frames/%d.png' % angle, dpi = 150)
#%% Converting frames into a gif
gif_name = 'Profile_Rotation'
frames = glob.glob('./frames/*.png')
list.sort(frames, key = lambda x: int(x.split('./frames/')[1].split('.png')[0]))
with open('frames.txt', 'w') as file:
for i in frames:
file.write("%s\n" % i)
os.system('convert @frames.txt {}.gif'.format(gif_name))
| [
"matplotlib.rc",
"numpy.amin",
"matplotlib.colors.Normalize",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.amax",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.loadtxt",
"glob.glob",
"matplotlib.pyplot.sa... | [((314, 338), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (327, 338), True, 'from matplotlib import pyplot as plt\n'), ((339, 408), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'DejaVu Sans', 'serif': ['Computer Modern']})\n", (341, 408), False, 'from matplotlib import rc\n'), ((409, 432), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (411, 432), False, 'from matplotlib import rc\n'), ((507, 519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (517, 519), True, 'from matplotlib import pyplot as plt\n'), ((679, 704), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (694, 704), True, 'import matplotlib as mpl\n'), ((712, 749), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(10)'}), '(vmin=0, vmax=10)\n', (732, 749), True, 'import matplotlib as mpl\n'), ((784, 806), 'glob.glob', 'glob.glob', (['"""Data*.txt"""'], {}), "('Data*.txt')\n", (793, 806), False, 'import glob\n'), ((1808, 1851), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (1829, 1851), True, 'from matplotlib import pyplot as plt\n'), ((1962, 1997), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Profile.png"""'], {'dpi': '(300)'}), "('Profile.png', dpi=300)\n", (1973, 1997), True, 'from matplotlib import pyplot as plt\n'), ((2239, 2266), 'glob.glob', 'glob.glob', (['"""./frames/*.png"""'], {}), "('./frames/*.png')\n", (2248, 2266), False, 'import glob\n'), ((871, 898), 'numpy.loadtxt', 'np.loadtxt', (['i'], {'comments': '"""#"""'}), "(i, comments='#')\n", (881, 898), True, 'import numpy as np\n'), ((1092, 1102), 'numpy.amin', 'np.amin', (['P'], {}), '(P)\n', (1099, 1102), True, 'import numpy as np\n'), ((1114, 1124), 'numpy.amax', 'np.amax', (['P'], {}), '(P)\n', (1121, 1124), True, 'import numpy as np\n'), ((1340, 1364), 'scipy.optimize.curve_fit', 'curve_fit', (['fit', 'x', 'P', 'pi'], {}), '(fit, x, P, pi)\n', (1349, 1364), False, 'from scipy.optimize import curve_fit\n'), ((1377, 1410), 'numpy.sqrt', 'np.sqrt', (['[pcov[0, 0], pcov[1, 1]]'], {}), '([pcov[0, 0], pcov[1, 1]])\n', (1384, 1410), True, 'import numpy as np\n'), ((1730, 1739), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (1736, 1739), True, 'import numpy as np\n'), ((2115, 2162), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./frames/%d.png' % angle)"], {'dpi': '(150)'}), "('./frames/%d.png' % angle, dpi=150)\n", (2126, 2162), True, 'from matplotlib import pyplot as plt\n'), ((1230, 1240), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (1237, 1240), True, 'import numpy as np\n'), ((1242, 1252), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (1249, 1252), True, 'import numpy as np\n'), ((1279, 1289), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (1286, 1289), True, 'import numpy as np\n'), ((1290, 1300), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (1297, 1300), True, 'import numpy as np\n'), ((1179, 1189), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1186, 1189), True, 'import numpy as np\n')] |
import numpy as np
from batch_rl_algorithms.batch_rl_algorithm import BatchRLAlgorithm
class DUIPI(BatchRLAlgorithm):
# Algorithm implemented following 'Uncertainty Propagation for Efficient Exploration in Reinforcement Learning'
# by <NAME> and <NAME>; a small modification has been added see the Master's thesis
# 'Evaluation of Safe Policy Improvement with Soft Baseline Bootstrapping'
NAME = 'DUIPI'
def __init__(self, pi_b, gamma, nb_states, nb_actions, data, R, episodic, bayesian, xi, alpha_prior=0.1,
zero_unseen=True, max_nb_it=5000, checks=False, speed_up_dict=None):
"""
:param pi_b: numpy matrix with shape (nb_states, nb_actions), such that pi_b(s,a) refers to the probability of
choosing action a in state s by the behavior policy
:param gamma: discount factor
:param nb_states: number of states of the MDP
:param nb_actions: number of actions available in each state
:param data: the data collected by the behavior policy, which should be a list of [state, action, next_state,
reward] sublists
:param R: reward matrix as numpy array with shape (nb_states, nb_states), assuming that the reward is deterministic w.r.t. the
previous and the next states
:param episodic: boolean variable, indicating whether the MDP is episodic (True) or non-episodic (False)
:param zero_unseen: boolean variable, indicating whether the estimated model should guess set all transition
probabilities to zero for a state-action pair which has never been visited (True) or to 1/nb_states (False)
:param max_nb_it: integer, indicating the maximal number of times the PE and PI step should be executed, if
convergence is not reached
:param checks: boolean variable indicating if different validity checks should be executed (True) or not
(False); this should be set to True for development reasons, but it is time consuming for big experiments
:param speed_up_dict: a dictionary containing pre-calculated quantities which can be reused by many different
algorithms, this should only be used for big experiments; for DUIPI this should only contain
the following:
'count_state_action': numpy array with shape (nb_states, nb_actions) indicating the number of times a s
tate-action pair has been visited
'count_state_action_state': numpy array with shape (nb_states, nb_actions, nb_states) indicating the number of
times a state-action-next-state triplet has been visited
:param bayesian: boolean variable, indicating whether the estimation of the variance of the estimation of the
transition probabilities should be done bayesian (True) using the Dirichlet distribution as a prior or
frequentistic (False)
:param xi: hyper-parameter of DUIPI, the higher xi is, the stronger is the influence of the variance
:param alpha_prior: float variable necessary if bayesian=True, usually between 0 and 1
"""
self.xi = xi
self.alpha_prior = alpha_prior
self.bayesian = bayesian
super().__init__(pi_b, gamma, nb_states, nb_actions, data, R, episodic, zero_unseen, max_nb_it, checks,
speed_up_dict)
self.variance_q = np.zeros([self.nb_states, self.nb_actions])
self.pi = 1 / self.nb_actions * np.ones([self.nb_states, self.nb_actions])
def _initial_calculations(self):
"""
Starts all the calculations which can be done before the actual training.
"""
self._prepare_R_and_variance_R()
self._prepare_P_and_variance_P()
self._compute_mask()
def _prepare_R_and_variance_R(self):
"""
Estimates the reward matrix and its variance.
"""
self.R_state_action_state = np.zeros((self.nb_states, self.nb_actions, self.nb_states))
for action in range(self.nb_actions):
self.R_state_action_state[:, action, :] = self.R_state_state.copy()
self.variance_R = np.zeros([self.nb_states, self.nb_actions, self.nb_states])
def _prepare_P_and_variance_P(self):
"""
Estimates the reward matrix and its variance.
"""
self.variance_P = np.zeros([self.nb_states, self.nb_actions, self.nb_states])
if self.bayesian:
alpha_d = (self.count_state_action_state + self.alpha_prior)
alpha_d_0 = np.sum(alpha_d, 2)[:, :, np.newaxis]
self.transition_model = alpha_d / alpha_d_0
self.variance_P = alpha_d * (alpha_d_0 - alpha_d) / alpha_d_0 ** 2 / (alpha_d_0 + 1)
else:
self._build_model()
for state in range(self.nb_states):
self.variance_P[:, :, state] = self.transition_model[:, :, state] * (
1 - self.transition_model[:, :, state]) / (
self.count_state_action - 1)
self.variance_P = np.nan_to_num(self.variance_P, nan=1 / 4,
posinf=1 / 4) # maximal variance is (b - a)^2 / 4
self.variance_P[
self.count_state_action == 0] = 1 / 4 # Otherwise variance_P would be if a state-action pair hasn't been visited yet
self._check_if_valid_transitions()
def _compute_mask(self):
"""
Compute the mask which indicates which state-pair has never been visited.
"""
self.mask = self.count_state_action > 0
def _policy_evaluation(self):
"""
Evaluates the current policy self.pi and calculates its variance.
:return:
"""
self.v = np.einsum('ij,ij->i', self.pi, self.q)
self.variance_v = np.einsum('ij,ij->i', self.pi ** 2, self.variance_q)
self.q = np.einsum('ijk,ijk->ij', self.transition_model, self.R_state_action_state + self.gamma * self.v)
self.variance_q = np.dot(self.gamma ** 2 * self.transition_model ** 2, self.variance_v) + \
np.einsum('ijk,ijk->ij', (self.R_state_action_state + self.gamma * self.v) ** 2,
self.variance_P) + \
np.einsum('ijk,ijk->ij', self.transition_model ** 2, self.variance_R)
self.variance_q = np.nan_to_num(self.variance_q, nan=np.inf, posinf=np.inf)
def _policy_improvement(self):
"""
Updates the current policy self.pi.
"""
q_uncertainty_and_mask_corrected = self.q - self.xi * np.sqrt(self.variance_q)
# The extra modification to avoid unobserved state-action pairs
q_uncertainty_and_mask_corrected[~self.mask] = - np.inf
best_action = np.argmax(q_uncertainty_and_mask_corrected, axis=1)
for state in range(self.nb_states):
d_s = np.minimum(1 / self.nb_it, 1 - self.pi[state, best_action[state]])
self.pi[state, best_action[state]] += d_s
for action in range(self.nb_actions):
if action == best_action[state]:
continue
elif self.pi[state, best_action[state]] == 1:
self.pi[state, action] = 0
else:
self.pi[state, action] = self.pi[state, action] * (1 - self.pi[state, best_action[state]]) / (
1 - self.pi[state, best_action[state]] + d_s)
| [
"numpy.minimum",
"numpy.sum",
"numpy.nan_to_num",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.einsum",
"numpy.dot",
"numpy.sqrt"
] | [((3338, 3381), 'numpy.zeros', 'np.zeros', (['[self.nb_states, self.nb_actions]'], {}), '([self.nb_states, self.nb_actions])\n', (3346, 3381), True, 'import numpy as np\n'), ((3876, 3935), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions, self.nb_states)'], {}), '((self.nb_states, self.nb_actions, self.nb_states))\n', (3884, 3935), True, 'import numpy as np\n'), ((4088, 4147), 'numpy.zeros', 'np.zeros', (['[self.nb_states, self.nb_actions, self.nb_states]'], {}), '([self.nb_states, self.nb_actions, self.nb_states])\n', (4096, 4147), True, 'import numpy as np\n'), ((4294, 4353), 'numpy.zeros', 'np.zeros', (['[self.nb_states, self.nb_actions, self.nb_states]'], {}), '([self.nb_states, self.nb_actions, self.nb_states])\n', (4302, 4353), True, 'import numpy as np\n'), ((5723, 5761), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'self.pi', 'self.q'], {}), "('ij,ij->i', self.pi, self.q)\n", (5732, 5761), True, 'import numpy as np\n'), ((5788, 5840), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', '(self.pi ** 2)', 'self.variance_q'], {}), "('ij,ij->i', self.pi ** 2, self.variance_q)\n", (5797, 5840), True, 'import numpy as np\n'), ((5858, 5958), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', 'self.transition_model', '(self.R_state_action_state + self.gamma * self.v)'], {}), "('ijk,ijk->ij', self.transition_model, self.R_state_action_state +\n self.gamma * self.v)\n", (5867, 5958), True, 'import numpy as np\n'), ((6341, 6398), 'numpy.nan_to_num', 'np.nan_to_num', (['self.variance_q'], {'nan': 'np.inf', 'posinf': 'np.inf'}), '(self.variance_q, nan=np.inf, posinf=np.inf)\n', (6354, 6398), True, 'import numpy as np\n'), ((6749, 6800), 'numpy.argmax', 'np.argmax', (['q_uncertainty_and_mask_corrected'], {'axis': '(1)'}), '(q_uncertainty_and_mask_corrected, axis=1)\n', (6758, 6800), True, 'import numpy as np\n'), ((3422, 3464), 'numpy.ones', 'np.ones', (['[self.nb_states, self.nb_actions]'], {}), '([self.nb_states, self.nb_actions])\n', (3429, 3464), True, 'import numpy as np\n'), ((5029, 5084), 'numpy.nan_to_num', 'np.nan_to_num', (['self.variance_P'], {'nan': '(1 / 4)', 'posinf': '(1 / 4)'}), '(self.variance_P, nan=1 / 4, posinf=1 / 4)\n', (5042, 5084), True, 'import numpy as np\n'), ((6245, 6314), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', '(self.transition_model ** 2)', 'self.variance_R'], {}), "('ijk,ijk->ij', self.transition_model ** 2, self.variance_R)\n", (6254, 6314), True, 'import numpy as np\n'), ((6863, 6929), 'numpy.minimum', 'np.minimum', (['(1 / self.nb_it)', '(1 - self.pi[state, best_action[state]])'], {}), '(1 / self.nb_it, 1 - self.pi[state, best_action[state]])\n', (6873, 6929), True, 'import numpy as np\n'), ((4477, 4495), 'numpy.sum', 'np.sum', (['alpha_d', '(2)'], {}), '(alpha_d, 2)\n', (4483, 4495), True, 'import numpy as np\n'), ((5981, 6050), 'numpy.dot', 'np.dot', (['(self.gamma ** 2 * self.transition_model ** 2)', 'self.variance_v'], {}), '(self.gamma ** 2 * self.transition_model ** 2, self.variance_v)\n', (5987, 6050), True, 'import numpy as np\n'), ((6081, 6182), 'numpy.einsum', 'np.einsum', (['"""ijk,ijk->ij"""', '((self.R_state_action_state + self.gamma * self.v) ** 2)', 'self.variance_P'], {}), "('ijk,ijk->ij', (self.R_state_action_state + self.gamma * self.v) **\n 2, self.variance_P)\n", (6090, 6182), True, 'import numpy as np\n'), ((6565, 6589), 'numpy.sqrt', 'np.sqrt', (['self.variance_q'], {}), '(self.variance_q)\n', (6572, 6589), True, 'import numpy as np\n')] |
import argparse
import ast
import re
import threading
import time
import numpy as np
from pandas import read_table
from deep4cast.forecasters import Forecaster
from deep4cast.metrics import adjust_for_horizon, mape
import os
mutex = threading.Lock()
def load_topology(args):
"""
Loads the network(s) topology from a file if file is passed by user
otherwise just returns the default topology
"""
if args.topology_file:
topologies = []
with open(args.topology_file) as f:
content = f.readlines()
for line in content:
topology = []
layers = re.findall('\(.+?\)', line)
for layer in layers:
layer_info = re.findall('{.*?}', layer)
dict1 = ast.literal_eval(layer_info[0])
dict2 = ast.literal_eval(layer_info[1])
topology.append((dict1, dict2))
topologies.append(topology)
return topologies
else:
return None
def build_datasets(ts, lookback_period, test_fraction):
"""
Build the train and test sets
:param args:
:param ts:
:return:
"""
# Format data to shape (n_steps, n_vars, n_series)
while len(ts.shape) < 3:
ts = np.expand_dims(ts, axis=-1)
test_length = int(len(ts) * test_fraction)
ts_train = ts[:-test_length]
ts_test = ts[-test_length - lookback_period:]
return ts_train, ts_test
def run_model(args, data_file, test_fraction, lag, topologies, epochs, batch_size, separator, horizon, lr, optimizers,
results):
"""
Runs the forecaster on a data set (with given parameters) and computes the prediction accuracy
:return:
"""
global mutex
print("Running for data " + data_file + " ...")
df = read_table(data_file, sep=separator)
train_set, test_set = build_datasets(df.values, lag, test_fraction)
for i in range(0, len(topologies)):
print("Train\t\t model:" + topologies[i] + "\tdataset:" + data_file)
start = time.time()
try:
forecaster = Forecaster(
topologies[i],
optimizer=optimizers[i],
lag=lag,
horizon=horizon,
batch_size=batch_size,
epochs=epochs,
uncertainty=args.uncertainty,
dropout_rate=args.dropout_rate,
lr=lr,
verbose=args.verbose
)
forecaster.fit(train_set, verbose=args.verbose)
train_time = time.time() - start
metric = adjust_for_horizon(mape)
print("Predict\t\t model:" + topologies[i] + "\tdataset:" + data_file)
train_err = metric(forecaster.predict(train_set, n_samples=args.n_samples)['mean'],
train_set[lag:len(train_set)])
test_err = metric(forecaster.predict(test_set, n_samples=args.n_samples)['mean'],
test_set[lag:len(test_set)])
except Exception as e:
print(e)
train_time = time.time() - start
train_err = 'NA'
test_err = 'NA'
mutex.acquire()
file_name = os.path.basename(data_file)
results.append((file_name, topologies[i], train_err, test_err, train_time))
mutex.release()
return train_err, test_err, train_time
def run_single_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons, lrs,
optimizers):
results = []
for i in range(0, len(args.data_files)):
run_model(args, args.data_files[i], test_fractions[i], lags[i],
topologies, epochs[i], batch_sizes[i], separators[i],
horizons[i], lrs[i], optimizers, results)
return results
def run_multi_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons, lrs,
optimizers):
threads = []
results = []
num_threads = args.threads
for i in range(0, len(args.data_files)):
if len(threads) < num_threads:
threads.append(threading.Thread(target=run_model, args=(
args, args.data_files[i], test_fractions[i], lags[i], topologies, epochs[i], batch_sizes[i],
separators[i],
horizons[i], lrs[i], optimizers, results)))
else:
for i in range(0, len(threads)):
threads[i].start()
for i in range(0, len(threads)):
threads[i].join()
threads.clear()
if len(threads) > 0:
for i in range(0, len(threads)):
threads[i].start()
for i in range(0, len(threads)):
threads[i].join()
return results
def fill_list(list, target_size):
"""
Creates a new list out of a given one and extends
it with last element of the list
:return: the extended list
"""
new_list = []
given_list_len = len(list)
i = 1
while i <= target_size:
if i < given_list_len:
new_list.append(list[i - 1])
else:
new_list.append(list[given_list_len - 1])
i += 1
return new_list
def main(args):
"""
:param args:
"""
print("\n\nRunning the benchmarks ...")
topologies = load_topology(args)
if topologies is None:
topologies = args.network_type
separators = fill_list(args.separator, len(args.data_files))
lags = fill_list(args.lag, len(args.data_files))
horizons = fill_list(args.horizon, len(args.data_files))
test_fractions = fill_list(args.test_fraction, len(args.data_files))
epochs = fill_list(args.epochs, len(topologies))
batch_sizes = fill_list(args.batch_size, len(topologies))
optimizers = fill_list(args.optimizer, len(topologies))
lrs = fill_list(args.learning_rate, len(topologies))
if args.multi_threaded == 1:
results = run_multi_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons,
lrs, optimizers)
else:
results = run_single_threaded(args, test_fractions, lags, topologies, epochs, batch_sizes, separators, horizons,
lrs, optimizers)
print("#" * 100)
print("Model\t\t\t\tTrain Metric\t\t\t\tTest Metric\t\t\t\tTrain Time\t\t\t\tDataset")
print("#" * 100)
for i in range(0, len(results)):
print(results[i][1] + '\t\t\t' + str(results[i][2]) + '\t\t\t' + str(results[i][3]) + '\t\t\t' + str(
results[i][4]) + '\t\t\t' + str(results[i][0]))
print("#" * 100)
def _get_parser():
"""
Collect all relevant command line arguments
:return:
"""
parser = argparse.ArgumentParser()
named_args = parser.add_argument_group('named arguments')
named_args.add_argument('-d', '--data-files',
help="List of data files",
required=True,
nargs="+")
named_args.add_argument('-nt', '--network_type',
help="Network type",
required=False,
default=['rnn'],
type=str,
nargs="+")
named_args.add_argument('-topology_file', '--topology-file',
help="File containing the networks topology (it overrides the --network_type parameter.",
required=False,
type=str)
named_args.add_argument('-lg', '--lag',
help="Lookback period",
required=True,
nargs="+",
type=int)
named_args.add_argument('-hr', '--horizon',
help="Forecasting horizon",
required=False,
default=[1],
nargs="+",
type=int)
named_args.add_argument('-o', '--optimizer',
help="Optimizer type",
required=False,
default=['sgd'],
nargs="+",
type=str)
named_args.add_argument('-sep', '--separator',
help="Location of data sets",
required=False,
default=[','],
nargs="+")
named_args.add_argument('-tf', '--test-fraction',
help="Test fraction at end of dataset",
required=False,
default=[0.2],
nargs="+",
type=float)
named_args.add_argument('-e', '--epochs',
help="Number of epochs to run",
required=False,
default=[100],
nargs="+",
type=int)
named_args.add_argument('-b', '--batch-size',
help="Location of validation data",
required=False,
default=[8],
nargs="+",
type=int)
named_args.add_argument('-lr', '--learning-rate',
help="Learning rate",
required=False,
default=[0.1],
nargs="+",
type=float)
named_args.add_argument('-u', '--uncertainty',
help="Toggle uncertainty",
required=False,
default=False,
type=bool)
named_args.add_argument('-dr', '--dropout_rate',
help="Dropout rate",
required=False,
default=0.1,
type=float)
named_args.add_argument('-s', '--n_samples',
help="Number of dropout samples",
required=False,
default=10,
type=int)
named_args.add_argument('-m', '--multi-threaded',
help="Multi-Threaded execution",
required=False,
default=0,
type=int)
named_args.add_argument('-threads', '--threads',
help="Number of threads to parallelize the computation",
required=False,
default=3,
type=int)
named_args.add_argument('-v', '--verbose',
help="Verbose",
required=False,
default=0,
type=int)
return parser
if __name__ == '__main__':
args = _get_parser().parse_args()
main(args)
| [
"threading.Thread",
"ast.literal_eval",
"argparse.ArgumentParser",
"os.path.basename",
"numpy.expand_dims",
"deep4cast.metrics.adjust_for_horizon",
"time.time",
"threading.Lock",
"deep4cast.forecasters.Forecaster",
"re.findall",
"pandas.read_table"
] | [((237, 253), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (251, 253), False, 'import threading\n'), ((1782, 1818), 'pandas.read_table', 'read_table', (['data_file'], {'sep': 'separator'}), '(data_file, sep=separator)\n', (1792, 1818), False, 'from pandas import read_table\n'), ((6759, 6784), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6782, 6784), False, 'import argparse\n'), ((1243, 1270), 'numpy.expand_dims', 'np.expand_dims', (['ts'], {'axis': '(-1)'}), '(ts, axis=-1)\n', (1257, 1270), True, 'import numpy as np\n'), ((2025, 2036), 'time.time', 'time.time', ([], {}), '()\n', (2034, 2036), False, 'import time\n'), ((3200, 3227), 'os.path.basename', 'os.path.basename', (['data_file'], {}), '(data_file)\n', (3216, 3227), False, 'import os\n'), ((624, 653), 're.findall', 're.findall', (['"""\\\\(.+?\\\\)"""', 'line'], {}), "('\\\\(.+?\\\\)', line)\n", (634, 653), False, 'import re\n'), ((2075, 2288), 'deep4cast.forecasters.Forecaster', 'Forecaster', (['topologies[i]'], {'optimizer': 'optimizers[i]', 'lag': 'lag', 'horizon': 'horizon', 'batch_size': 'batch_size', 'epochs': 'epochs', 'uncertainty': 'args.uncertainty', 'dropout_rate': 'args.dropout_rate', 'lr': 'lr', 'verbose': 'args.verbose'}), '(topologies[i], optimizer=optimizers[i], lag=lag, horizon=horizon,\n batch_size=batch_size, epochs=epochs, uncertainty=args.uncertainty,\n dropout_rate=args.dropout_rate, lr=lr, verbose=args.verbose)\n', (2085, 2288), False, 'from deep4cast.forecasters import Forecaster\n'), ((2582, 2606), 'deep4cast.metrics.adjust_for_horizon', 'adjust_for_horizon', (['mape'], {}), '(mape)\n', (2600, 2606), False, 'from deep4cast.metrics import adjust_for_horizon, mape\n'), ((714, 740), 're.findall', 're.findall', (['"""{.*?}"""', 'layer'], {}), "('{.*?}', layer)\n", (724, 740), False, 'import re\n'), ((765, 796), 'ast.literal_eval', 'ast.literal_eval', (['layer_info[0]'], {}), '(layer_info[0])\n', (781, 796), False, 'import ast\n'), ((821, 852), 'ast.literal_eval', 'ast.literal_eval', (['layer_info[1]'], {}), '(layer_info[1])\n', (837, 852), False, 'import ast\n'), ((2541, 2552), 'time.time', 'time.time', ([], {}), '()\n', (2550, 2552), False, 'import time\n'), ((4141, 4340), 'threading.Thread', 'threading.Thread', ([], {'target': 'run_model', 'args': '(args, args.data_files[i], test_fractions[i], lags[i], topologies, epochs[i\n ], batch_sizes[i], separators[i], horizons[i], lrs[i], optimizers, results)'}), '(target=run_model, args=(args, args.data_files[i],\n test_fractions[i], lags[i], topologies, epochs[i], batch_sizes[i],\n separators[i], horizons[i], lrs[i], optimizers, results))\n', (4157, 4340), False, 'import threading\n'), ((3078, 3089), 'time.time', 'time.time', ([], {}), '()\n', (3087, 3089), False, 'import time\n')] |
#!/usr/bin/env python
"""
Misfit functions used by the 'default' preprocess class use to quantify
differences between data and synthetics.
All functions defined have four required positional arguments
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
import numpy as np
from scipy.signal import hilbert as analytic
def waveform(syn, obs, nt, dt, *args, **kwargs):
"""
Direct waveform differencing
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
wrsd = syn - obs
return np.sqrt(np.sum(wrsd * wrsd * dt))
def envelope(syn, obs, nt, dt, *args, **kwargs):
"""
Waveform envelope difference from Yuan et al. 2015 Eq. 9
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
env_syn = abs(analytic(syn))
env_obs = abs(analytic(obs))
# Residual of envelopes
env_rsd = env_syn - env_obs
return np.sqrt(np.sum(env_rsd * env_rsd * dt))
def instantaneous_phase(syn, obs, nt, dt, *args, **kwargs):
"""
Instantaneous phase difference from Bozdag et al. 2011
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
r = np.real(analytic(syn))
i = np.imag(analytic(syn))
phi_syn = np.arctan2(i, r)
r = np.real(analytic(obs))
i = np.imag(analytic(obs))
phi_obs = np.arctan2(i, r)
phi_rsd = phi_syn - phi_obs
return np.sqrt(np.sum(phi_rsd * phi_rsd * dt))
def traveltime(syn, obs, nt, dt, *args, **kwargs):
"""
Cross-correlation traveltime
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
cc = abs(np.convolve(obs, np.flipud(syn)))
return (np.argmax(cc) - nt + 1) * dt
def traveltime_inexact(syn, obs, nt, dt, *args, **kwargs):
"""
A faster cc traveltime function but possibly innacurate
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
it = np.argmax(syn)
jt = np.argmax(obs)
return (jt - it) * dt
def amplitude(syn, obs, nt, dt, *args, **kwargs):
"""
Cross-correlation amplitude difference
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
ioff = (np.argmax(cc) - nt + 1) * dt
if ioff <= 0:
wrsd = syn[ioff:] - obs[:-ioff]
else:
wrsd = syn[:-ioff] - obs[ioff:]
return np.sqrt(np.sum(wrsd * wrsd * dt))
def envelope2(syn, obs, nt, dt, *args, **kwargs):
"""
Envelope amplitude ratio from Yuan et al. 2015 Eq. B-1
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
env_syn = abs(analytic(syn))
env_obs = abs(analytic(obs))
raise NotImplementedError
def envelope3(syn, obs, nt, dt, eps=0., *args, **kwargs):
"""
Envelope cross-correlation lag from Yuan et al. 2015, Eq. B-4
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
env_syn = abs(analytic(syn))
env_obs = abs(analytic(obs))
return Traveltime(env_syn, env_obs, nt, dt)
def instantaneous_phase2(syn, obs, nt, dt, eps=0., *args, **kwargs):
"""
Alterative instantaneous phase function
:type syn: np.array
:param syn: synthetic data array
:type obs: np.array
:param obs: observed data array
:type nt: int
:param nt: number of time steps in the data array
:type dt: float
:param dt: time step in sec
"""
env_syn = abs(analytic(syn))
env_obs = abs(analytic(obs))
env_syn1 = env_syn + eps * max(env_syn)
env_obs1 = env_obs + eps * max(env_obs)
diff = (syn / env_syn1) - (obs / env_obs1)
return np.sqrt(np.sum(diff * diff * dt))
def displacement(*args, **kwargs):
return Exception("This function can only used for migration.")
def velocity(*args, **kwargs):
return Exception("This function can only used for migration.")
def acceleration(*args, **kwargs):
return Exception("This function can only used for migration.")
| [
"numpy.arctan2",
"numpy.sum",
"numpy.argmax",
"numpy.flipud",
"scipy.signal.hilbert"
] | [((1943, 1959), 'numpy.arctan2', 'np.arctan2', (['i', 'r'], {}), '(i, r)\n', (1953, 1959), True, 'import numpy as np\n'), ((2037, 2053), 'numpy.arctan2', 'np.arctan2', (['i', 'r'], {}), '(i, r)\n', (2047, 2053), True, 'import numpy as np\n'), ((2969, 2983), 'numpy.argmax', 'np.argmax', (['syn'], {}), '(syn)\n', (2978, 2983), True, 'import numpy as np\n'), ((2993, 3007), 'numpy.argmax', 'np.argmax', (['obs'], {}), '(obs)\n', (3002, 3007), True, 'import numpy as np\n'), ((905, 929), 'numpy.sum', 'np.sum', (['(wrsd * wrsd * dt)'], {}), '(wrsd * wrsd * dt)\n', (911, 929), True, 'import numpy as np\n'), ((1323, 1336), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (1331, 1336), True, 'from scipy.signal import hilbert as analytic\n'), ((1356, 1369), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (1364, 1369), True, 'from scipy.signal import hilbert as analytic\n'), ((1452, 1482), 'numpy.sum', 'np.sum', (['(env_rsd * env_rsd * dt)'], {}), '(env_rsd * env_rsd * dt)\n', (1458, 1482), True, 'import numpy as np\n'), ((1883, 1896), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (1891, 1896), True, 'from scipy.signal import hilbert as analytic\n'), ((1914, 1927), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (1922, 1927), True, 'from scipy.signal import hilbert as analytic\n'), ((1977, 1990), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (1985, 1990), True, 'from scipy.signal import hilbert as analytic\n'), ((2008, 2021), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (2016, 2021), True, 'from scipy.signal import hilbert as analytic\n'), ((2107, 2137), 'numpy.sum', 'np.sum', (['(phi_rsd * phi_rsd * dt)'], {}), '(phi_rsd * phi_rsd * dt)\n', (2113, 2137), True, 'import numpy as np\n'), ((3562, 3586), 'numpy.sum', 'np.sum', (['(wrsd * wrsd * dt)'], {}), '(wrsd * wrsd * dt)\n', (3568, 3586), True, 'import numpy as np\n'), ((3979, 3992), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (3987, 3992), True, 'from scipy.signal import hilbert as analytic\n'), ((4012, 4025), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (4020, 4025), True, 'from scipy.signal import hilbert as analytic\n'), ((4464, 4477), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (4472, 4477), True, 'from scipy.signal import hilbert as analytic\n'), ((4497, 4510), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (4505, 4510), True, 'from scipy.signal import hilbert as analytic\n'), ((4956, 4969), 'scipy.signal.hilbert', 'analytic', (['syn'], {}), '(syn)\n', (4964, 4969), True, 'from scipy.signal import hilbert as analytic\n'), ((4989, 5002), 'scipy.signal.hilbert', 'analytic', (['obs'], {}), '(obs)\n', (4997, 5002), True, 'from scipy.signal import hilbert as analytic\n'), ((5161, 5185), 'numpy.sum', 'np.sum', (['(diff * diff * dt)'], {}), '(diff * diff * dt)\n', (5167, 5185), True, 'import numpy as np\n'), ((2518, 2532), 'numpy.flipud', 'np.flipud', (['syn'], {}), '(syn)\n', (2527, 2532), True, 'import numpy as np\n'), ((2548, 2561), 'numpy.argmax', 'np.argmax', (['cc'], {}), '(cc)\n', (2557, 2561), True, 'import numpy as np\n'), ((3404, 3417), 'numpy.argmax', 'np.argmax', (['cc'], {}), '(cc)\n', (3413, 3417), True, 'import numpy as np\n')] |
# This helper code is adapted from:
# https://github.com/scikit-learn/scikit-learn/pull/16061
# TODO: remove this code from the MOOC when the feature is
# made available in a stable version of scikit-learn itself.
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_matplotlib_support
from sklearn.utils import _safe_indexing
def _check_boundary_response_method(estimator, response_method):
"""Return prediction method from the `response_method` for decision boundary.
Parameters
----------
estimator : object
Estimator to check.
response_method : {'auto', 'predict_proba', 'decision_function', 'predict'}
Specifies whether to use :term:`predict_proba`,
:term:`decision_function`, :term:`predict` as the target response.
If set to 'auto', the response method is tried in the following order:
:term:`predict_proba`, :term:`decision_function`, :term:`predict`.
Returns
-------
prediction_method: callable
Prediction method of estimator.
"""
possible_response_methods = (
"predict_proba",
"decision_function",
"auto",
"predict",
)
if response_method not in possible_response_methods:
raise ValueError(
f"response_method must be one of {', '.join(possible_response_methods)}"
)
error_msg = "response method {} is not defined in {}"
if response_method != "auto":
if not hasattr(estimator, response_method):
raise ValueError(
error_msg.format(response_method, estimator.__class__.__name__)
)
return getattr(estimator, response_method)
elif hasattr(estimator, "decision_function"):
return getattr(estimator, "decision_function")
elif hasattr(estimator, "predict_proba"):
return getattr(estimator, "predict_proba")
elif hasattr(estimator, "predict"):
return getattr(estimator, "predict")
raise ValueError(
error_msg.format(
"decision_function, predict_proba, or predict", estimator.__class__.__name__
)
)
class DecisionBoundaryDisplay:
"""Decisions boundary visualization.
It is recommend to use
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
attributes.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 1.0
Parameters
----------
xx0 : ndarray of shape (grid_resolution, grid_resolution)
First output of :func:`meshgrid <numpy.meshgrid>`.
xx1 : ndarray of shape (grid_resolution, grid_resolution)
Second output of :func:`meshgrid <numpy.meshgrid>`.
response : ndarray of shape (grid_resolution, grid_resolution)
Values of the response function.
xlabel : str, default=""
Default label to place on x axis.
ylabel : str, default=""
Default label to place on y axis.
Attributes
----------
surface_ : matplotlib `QuadContourSet` or `QuadMesh`
If `plot_method` is 'contour' or 'contourf', `surface_` is a
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
`plot_method is `pcolormesh`, `surface_` is a
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
ax_ : matplotlib Axes
Axes with confusion matrix.
figure_ : matplotlib Figure
Figure containing the confusion matrix.
"""
def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None):
self.xx0 = xx0
self.xx1 = xx1
self.response = response
self.xlabel = xlabel
self.ylabel = ylabel
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
"""Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolomesh <matplotlib.pyplot.pcolomesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
"""
check_matplotlib_support("DecisionBoundaryDisplay.plot")
import matplotlib.pyplot as plt # noqa
if plot_method not in ("contourf", "contour", "pcolormesh"):
raise ValueError(
"plot_method must be 'contourf', 'contour', or 'pcolormesh'"
)
if ax is None:
_, ax = plt.subplots()
plot_func = getattr(ax, plot_method)
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
if xlabel is not None or not ax.get_xlabel():
xlabel = self.xlabel if xlabel is None else xlabel
ax.set_xlabel(xlabel)
if ylabel is not None or not ax.get_ylabel():
ylabel = self.ylabel if ylabel is None else ylabel
ax.set_ylabel(ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
*,
grid_resolution=100,
eps=1.0,
plot_method="contourf",
response_method="auto",
xlabel=None,
ylabel=None,
ax=None,
**kwargs,
):
"""Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
.. versionadded:: 1.0
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolomesh <matplotlib.pyplot.pcolomesh>`.
response_method : {'auto', 'predict_proba', 'decision_function', \
'predict'}, default='auto'
Specifies whether to use :term:`predict_proba`,
:term:`decision_function`, :term:`predict` as the target response.
If set to 'auto', the response method is tried in the following order:
:term:`predict_proba`, :term:`decision_function`, :term:`predict`.
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the
`plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
ConfusionMatrixDisplay.from_estimator : Plot the confusion matrix
given an estimator, the data, and the label.
ConfusionMatrixDisplay.from_predictions : Plot the confusion matrix
given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
if not grid_resolution > 1:
raise ValueError(
"grid_resolution must be greater than 1. Got"
f" {grid_resolution} instead."
)
if not eps >= 0:
raise ValueError(
f"eps must be greater than or equal to 0. Got {eps} instead."
)
possible_plot_methods = ("contourf", "contour", "pcolormesh")
if plot_method not in possible_plot_methods:
avaliable_methods = ", ".join(possible_plot_methods)
raise ValueError(
f"plot_method must be one of {avaliable_methods}. "
f"Got {plot_method} instead."
)
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
X_for_pred = np.c_[xx0.ravel(), xx1.ravel()]
if isinstance(X, pd.DataFrame):
X_for_pred = pd.DataFrame(X_for_pred, columns=X.columns)
pred_func = _check_boundary_response_method(estimator, response_method)
response = pred_func(X_for_pred)
if response_method == "predict":
label_encoder = LabelEncoder()
label_encoder.classes_ = estimator.classes_
response = label_encoder.transform(response)
if response.ndim != 1:
if response.shape[1] != 2:
raise ValueError(
"Multiclass classifiers are only supported when "
"response_method='predict'"
)
response = response[:, 1]
if xlabel is not None:
xlabel = xlabel
else:
xlabel = X.columns[0] if hasattr(X, "columns") else ""
if ylabel is not None:
ylabel = ylabel
else:
ylabel = X.columns[1] if hasattr(X, "columns") else ""
display = DecisionBoundaryDisplay(
xx0=xx0,
xx1=xx1,
response=response.reshape(xx0.shape),
xlabel=xlabel,
ylabel=ylabel,
)
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
| [
"pandas.DataFrame",
"sklearn.preprocessing.LabelEncoder",
"numpy.linspace",
"sklearn.utils.check_matplotlib_support",
"sklearn.utils._safe_indexing",
"matplotlib.pyplot.subplots"
] | [((4826, 4882), 'sklearn.utils.check_matplotlib_support', 'check_matplotlib_support', (['"""DecisionBoundaryDisplay.plot"""'], {}), "('DecisionBoundaryDisplay.plot')\n", (4850, 4882), False, 'from sklearn.utils import check_matplotlib_support\n'), ((9510, 9568), 'sklearn.utils.check_matplotlib_support', 'check_matplotlib_support', (['f"""{cls.__name__}.from_estimator"""'], {}), "(f'{cls.__name__}.from_estimator')\n", (9534, 9568), False, 'from sklearn.utils import check_matplotlib_support\n'), ((5166, 5180), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5178, 5180), True, 'import matplotlib.pyplot as plt\n'), ((10272, 10300), 'sklearn.utils._safe_indexing', '_safe_indexing', (['X', '(0)'], {'axis': '(1)'}), '(X, 0, axis=1)\n', (10286, 10300), False, 'from sklearn.utils import _safe_indexing\n'), ((10302, 10330), 'sklearn.utils._safe_indexing', '_safe_indexing', (['X', '(1)'], {'axis': '(1)'}), '(X, 1, axis=1)\n', (10316, 10330), False, 'from sklearn.utils import _safe_indexing\n'), ((10489, 10533), 'numpy.linspace', 'np.linspace', (['x0_min', 'x0_max', 'grid_resolution'], {}), '(x0_min, x0_max, grid_resolution)\n', (10500, 10533), True, 'import numpy as np\n'), ((10547, 10591), 'numpy.linspace', 'np.linspace', (['x1_min', 'x1_max', 'grid_resolution'], {}), '(x1_min, x1_max, grid_resolution)\n', (10558, 10591), True, 'import numpy as np\n'), ((10721, 10764), 'pandas.DataFrame', 'pd.DataFrame', (['X_for_pred'], {'columns': 'X.columns'}), '(X_for_pred, columns=X.columns)\n', (10733, 10764), True, 'import pandas as pd\n'), ((10957, 10971), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (10969, 10971), False, 'from sklearn.preprocessing import LabelEncoder\n')] |
""" Tests for example data. """
import os
import pytest
import numpy as np
import rasterio as rio
import geopandas as gpd
from earthpy.io import path_to_example
def test_invalid_datasets_raise_errors():
""" Raise errors when users provide nonexistent datasets. """
with pytest.raises(KeyError):
path_to_example("Non-existent dataset")
def test_missing_datasets_raise_errors():
""" Raise errors when users forget to provide a dataset. """
with pytest.raises(KeyError):
path_to_example("")
def test_valid_datasets_get_returned():
""" If users give a valid dataset name, return a valid path. """
epsg_path = path_to_example("epsg.json")
assert os.path.isfile(epsg_path)
def test_rgb():
""" Check assumptions about rgb satellite imagery over RMNP. """
with rio.open(path_to_example("rmnp-rgb.tif")) as src:
rgb = src.read()
rgb_crs = src.crs
assert rgb.shape == (3, 373, 485)
assert str(rgb_crs) == rio.crs.CRS.from_epsg(4326)
def test_rgb_single_channels():
""" Check assumptions about single channel R, G, and B images. """
fnames = [path_to_example(f) for f in ["red.tif", "green.tif", "blue.tif"]]
rgb_parts = list()
for f in fnames:
with rio.open(f) as src:
rgb_parts.append(src.read())
assert str(src.crs) == rio.crs.CRS.from_epsg(4326)
with rio.open(path_to_example("rmnp-rgb.tif")) as src:
assert np.array_equal(src.read(), np.concatenate(rgb_parts))
def test_colorado_counties():
""" Check assumptions about county polygons. """
counties = gpd.read_file(path_to_example("colorado-counties.geojson"))
assert counties.shape == (64, 13)
assert counties.crs == {"init": "epsg:4326"}
def test_colorado_glaciers():
""" Check assumptions about glacier point locations. """
glaciers = gpd.read_file(path_to_example("colorado-glaciers.geojson"))
assert glaciers.shape == (134, 2)
assert glaciers.crs == {"init": "epsg:4326"}
def test_continental_divide_trail():
""" Check assumptions about Continental Divide Trail path. """
cdt = gpd.read_file(path_to_example("continental-div-trail.geojson"))
assert cdt.shape == (1, 2)
assert cdt.crs == {"init": "epsg:4326"}
| [
"rasterio.open",
"numpy.concatenate",
"pytest.raises",
"earthpy.io.path_to_example",
"os.path.isfile",
"rasterio.crs.CRS.from_epsg"
] | [((652, 680), 'earthpy.io.path_to_example', 'path_to_example', (['"""epsg.json"""'], {}), "('epsg.json')\n", (667, 680), False, 'from earthpy.io import path_to_example\n'), ((692, 717), 'os.path.isfile', 'os.path.isfile', (['epsg_path'], {}), '(epsg_path)\n', (706, 717), False, 'import os\n'), ((281, 304), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (294, 304), False, 'import pytest\n'), ((314, 353), 'earthpy.io.path_to_example', 'path_to_example', (['"""Non-existent dataset"""'], {}), "('Non-existent dataset')\n", (329, 353), False, 'from earthpy.io import path_to_example\n'), ((472, 495), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (485, 495), False, 'import pytest\n'), ((505, 524), 'earthpy.io.path_to_example', 'path_to_example', (['""""""'], {}), "('')\n", (520, 524), False, 'from earthpy.io import path_to_example\n'), ((980, 1007), 'rasterio.crs.CRS.from_epsg', 'rio.crs.CRS.from_epsg', (['(4326)'], {}), '(4326)\n', (1001, 1007), True, 'import rasterio as rio\n'), ((1127, 1145), 'earthpy.io.path_to_example', 'path_to_example', (['f'], {}), '(f)\n', (1142, 1145), False, 'from earthpy.io import path_to_example\n'), ((1617, 1661), 'earthpy.io.path_to_example', 'path_to_example', (['"""colorado-counties.geojson"""'], {}), "('colorado-counties.geojson')\n", (1632, 1661), False, 'from earthpy.io import path_to_example\n'), ((1872, 1916), 'earthpy.io.path_to_example', 'path_to_example', (['"""colorado-glaciers.geojson"""'], {}), "('colorado-glaciers.geojson')\n", (1887, 1916), False, 'from earthpy.io import path_to_example\n'), ((2135, 2183), 'earthpy.io.path_to_example', 'path_to_example', (['"""continental-div-trail.geojson"""'], {}), "('continental-div-trail.geojson')\n", (2150, 2183), False, 'from earthpy.io import path_to_example\n'), ((823, 854), 'earthpy.io.path_to_example', 'path_to_example', (['"""rmnp-rgb.tif"""'], {}), "('rmnp-rgb.tif')\n", (838, 854), False, 'from earthpy.io import path_to_example\n'), ((1250, 1261), 'rasterio.open', 'rio.open', (['f'], {}), '(f)\n', (1258, 1261), True, 'import rasterio as rio\n'), ((1393, 1424), 'earthpy.io.path_to_example', 'path_to_example', (['"""rmnp-rgb.tif"""'], {}), "('rmnp-rgb.tif')\n", (1408, 1424), False, 'from earthpy.io import path_to_example\n'), ((1476, 1501), 'numpy.concatenate', 'np.concatenate', (['rgb_parts'], {}), '(rgb_parts)\n', (1490, 1501), True, 'import numpy as np\n'), ((1346, 1373), 'rasterio.crs.CRS.from_epsg', 'rio.crs.CRS.from_epsg', (['(4326)'], {}), '(4326)\n', (1367, 1373), True, 'import rasterio as rio\n')] |
"""
Piecewise-Linear Transformation Function
These functions, as the name suggests, are not entirely linear in the nature.
However, they are linear between certain x-intervals. One of the most commonly
used piecewise-linear transformation functions is contrast stretching.
"""
import cv2
import numpy as np
# function to map each intensity level to input intensity level
def pixel_val(pix, r1, s1, r2, s2):
if 0 <= pix and pix <= r2:
return (s1 / r1) * pix
elif (r1 < pix and pix <= r2):
return ((s2 - s1) / (r2 - r1)) * (pix - r1) + s1
else:
return ((255 - s2) / (255 - r2)) * (pix - r2) + s2
# open the image
img = cv2.imread("../images/sample.jpg")
# define parameters
r1 = 70
s1 = 0
r2 = 140
s2 = 255
# vectorize the function to apply it to each in the numpy array
pixel_val_vec = np.vectorize(pixel_val)
# apply contrast stretching
contrast_stretched = pixel_val_vec(img, r1, s1, r2, s2)
cv2.imshow("image", contrast_stretched)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"numpy.vectorize",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"cv2.imshow"
] | [((675, 709), 'cv2.imread', 'cv2.imread', (['"""../images/sample.jpg"""'], {}), "('../images/sample.jpg')\n", (685, 709), False, 'import cv2\n'), ((845, 868), 'numpy.vectorize', 'np.vectorize', (['pixel_val'], {}), '(pixel_val)\n', (857, 868), True, 'import numpy as np\n'), ((955, 994), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'contrast_stretched'], {}), "('image', contrast_stretched)\n", (965, 994), False, 'import cv2\n'), ((996, 1010), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1007, 1010), False, 'import cv2\n'), ((1011, 1034), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1032, 1034), False, 'import cv2\n')] |
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from scipy.stats import multivariate_normal
class GaussianNaiveBayes(BaseEstimator):
"""
Gaussian Naive-Bayes classifier
"""
def __init__(self):
"""
Instantiate a Gaussian Naive Bayes classifier
Attributes
----------
self.classes_ : np.ndarray of shape (n_classes,)
The different labels classes. To be set in `GaussianNaiveBayes.fit`
self.mu_ : np.ndarray of shape (n_classes,n_features)
The estimated features means for each class. To be set in `GaussianNaiveBayes.fit`
self.vars_ : np.ndarray of shape (n_classes, n_features)
The estimated features variances for each class. To be set in `GaussianNaiveBayes.fit`
self.pi_: np.ndarray of shape (n_classes)
The estimated class probabilities. To be set in `GaussianNaiveBayes.fit`
"""
super().__init__()
self.classes_, self.mu_, self.vars_, self.pi_ = None, None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits a gaussian naive bayes model
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
self.classes_, counts = np.unique(y, return_counts=True)
num_of_classes = len(self.classes_)
num_of_features = X.shape[1]
self.pi_ = counts / sum(counts)
self.mu_ = np.zeros((num_of_classes, num_of_features))
label_index_dict = {}
# map label to its index:
for i, k in enumerate(self.classes_):
label_index_dict[k] = i
# sum label's samples:
for index, label in enumerate(y):
self.mu_[label_index_dict[label]] += X[index]
# divide by number of samples of each class:
self.mu_ /= counts.reshape(-1, 1)
self.vars_ = np.zeros((num_of_classes, num_of_features))
for index, label in enumerate(y):
self.vars_[label_index_dict[label]] += (X[index] - self.mu_[label_index_dict[label]])**2
# divide by number of samples of each class:
self.vars_ /= counts.reshape(-1, 1)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.classes_[self.likelihood(X).argmax(1)]
def likelihood(self, X: np.ndarray) -> np.ndarray:
"""
Calculate the likelihood of a given data over the estimated model
Parameters
----------
X : np.ndarray of shape (n_samples, n_features)
Input data to calculate its likelihood over the different classes.
Returns
-------
likelihoods : np.ndarray of shape (n_samples, n_classes)
The likelihood for each sample under each of the classes
"""
if not self.fitted_:
raise ValueError(
"Estimator must first be fitted before calling `likelihood` function")
likelihood = np.zeros((X.shape[0], len(self.classes_)))
for index, row in enumerate(self.mu_):
mean = row
cov = np.diag(self.vars_[index])
likelihood[:, index] = multivariate_normal.pdf(X, mean=mean, cov=cov) \
* self.pi_[index]
return likelihood
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
from ...metrics import misclassification_error
return misclassification_error(y, self._predict(X))
| [
"numpy.diag",
"scipy.stats.multivariate_normal.pdf",
"numpy.zeros",
"numpy.unique"
] | [((1454, 1486), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1463, 1486), True, 'import numpy as np\n'), ((1629, 1672), 'numpy.zeros', 'np.zeros', (['(num_of_classes, num_of_features)'], {}), '((num_of_classes, num_of_features))\n', (1637, 1672), True, 'import numpy as np\n'), ((2067, 2110), 'numpy.zeros', 'np.zeros', (['(num_of_classes, num_of_features)'], {}), '((num_of_classes, num_of_features))\n', (2075, 2110), True, 'import numpy as np\n'), ((3623, 3649), 'numpy.diag', 'np.diag', (['self.vars_[index]'], {}), '(self.vars_[index])\n', (3630, 3649), True, 'import numpy as np\n'), ((3685, 3731), 'scipy.stats.multivariate_normal.pdf', 'multivariate_normal.pdf', (['X'], {'mean': 'mean', 'cov': 'cov'}), '(X, mean=mean, cov=cov)\n', (3708, 3731), False, 'from scipy.stats import multivariate_normal\n')] |
import numpy as np
import torch.nn.functional as F
from librosa.filters import mel
from librosa.util import pad_center
from scipy.signal import get_window
from torch.autograd import Variable
from .constants import *
class STFT(torch.nn.Module):
"""adapted from <NAME>'s https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length, hop_length, win_length=None, window='hann'):
super(STFT, self).__init__()
if win_length is None:
win_length = filter_length
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
def forward(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
class MelSpectrogram(torch.nn.Module):
def __init__(self, n_mels, sample_rate, filter_length, hop_length,
win_length=None, mel_fmin=0.0, mel_fmax=None):
super(MelSpectrogram, self).__init__()
self.stft = STFT(filter_length, hop_length, win_length)
mel_basis = mel(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def forward(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, T, n_mels)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = torch.log(torch.clamp(mel_output, min=1e-5))
return mel_output
# the default melspectrogram converter across the project
melspectrogram = MelSpectrogram(N_MELS, SAMPLE_RATE, WINDOW_LENGTH, HOP_LENGTH, mel_fmin=MEL_FMIN, mel_fmax=MEL_FMAX)
melspectrogram.to(DEFAULT_DEVICE)
| [
"librosa.util.pad_center",
"scipy.signal.get_window",
"torch.autograd.Variable",
"librosa.filters.mel",
"numpy.imag",
"numpy.real",
"numpy.eye"
] | [((2785, 2854), 'librosa.filters.mel', 'mel', (['sample_rate', 'filter_length', 'n_mels', 'mel_fmin', 'mel_fmax'], {'htk': '(True)'}), '(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)\n', (2788, 2854), False, 'from librosa.filters import mel\n'), ((729, 755), 'numpy.eye', 'np.eye', (['self.filter_length'], {}), '(self.filter_length)\n', (735, 755), True, 'import numpy as np\n'), ((1192, 1236), 'scipy.signal.get_window', 'get_window', (['window', 'win_length'], {'fftbins': '(True)'}), '(window, win_length, fftbins=True)\n', (1202, 1236), False, 'from scipy.signal import get_window\n'), ((1262, 1299), 'librosa.util.pad_center', 'pad_center', (['fft_window', 'filter_length'], {}), '(fft_window, filter_length)\n', (1272, 1299), False, 'from librosa.util import pad_center\n'), ((2030, 2079), 'torch.autograd.Variable', 'Variable', (['self.forward_basis'], {'requires_grad': '(False)'}), '(self.forward_basis, requires_grad=False)\n', (2038, 2079), False, 'from torch.autograd import Variable\n'), ((844, 878), 'numpy.real', 'np.real', (['fourier_basis[:cutoff, :]'], {}), '(fourier_basis[:cutoff, :])\n', (851, 878), True, 'import numpy as np\n'), ((915, 949), 'numpy.imag', 'np.imag', (['fourier_basis[:cutoff, :]'], {}), '(fourier_basis[:cutoff, :])\n', (922, 949), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bert submodules."""
# pylint: disable=missing-docstring, arguments-differ
import math
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.ops.functional as F
from mindspore import nn
from mindspore.common.initializer import TruncatedNormal
from mindspore.common.tensor import Tensor
from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator
from mindspore.ops import operations as P
class BertAttentionQueryKeyMul(nn.Cell):
def __init__(self,
batch_size,
from_tensor_width,
to_tensor_width,
from_seq_length,
to_seq_length,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
initializer_range=0.02):
super(BertAttentionQueryKeyMul, self).__init__()
self.from_tensor_width = from_tensor_width
self.to_tensor_width = to_tensor_width
self.units = num_attention_heads * size_per_head
self.weight = TruncatedNormal(initializer_range)
self.trans_shape = (0, 2, 1, 3)
self.transpose = P.Transpose()
self.reshape = P.Reshape()
self.shp_from_2d = (-1, self.from_tensor_width)
self.shp_to_2d = (-1, self.to_tensor_width)
self.query_layer = nn.Dense(self.from_tensor_width,
self.units,
activation=query_act,
weight_init=self.weight)
self.key_layer = nn.Dense(self.to_tensor_width,
self.units,
activation=key_act,
weight_init=self.weight)
self.shp_from = (batch_size, from_seq_length, num_attention_heads, size_per_head)
self.shp_to = (
batch_size, to_seq_length, num_attention_heads, size_per_head)
self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
self.cast = P.Cast()
def construct(self, from_tensor, to_tensor):
from_tensor_2d = self.reshape(from_tensor, self.shp_from_2d)
to_tensor_2d = self.reshape(to_tensor, self.shp_to_2d)
from_tensor_2d = self.cast(from_tensor_2d, mstype.float32)
to_tensor_2d = self.cast(to_tensor_2d, mstype.float32)
query_out = self.query_layer(from_tensor_2d)
key_out = self.key_layer(to_tensor_2d)
query_layer = self.reshape(query_out, self.shp_from)
query_layer = self.transpose(query_layer, self.trans_shape)
key_layer = self.reshape(key_out, self.shp_to)
key_layer = self.transpose(key_layer, self.trans_shape)
attention_scores = self.matmul_trans_b(query_layer, key_layer)
return query_layer, key_layer, attention_scores
class BertAttentionRelativePositionKeys(nn.Cell):
def __init__(self,
batch_size,
from_seq_length,
to_seq_length,
num_attention_heads=1,
size_per_head=512,
use_one_hot_embeddings=False,
initializer_range=0.02,
use_relative_positions=False,
dtype=mstype.float32,
compute_type=mstype.float32):
super(BertAttentionRelativePositionKeys, self).__init__()
self.batch_size = batch_size
self.from_seq_length = from_seq_length
self.to_seq_length = to_seq_length
self.use_relative_positions = use_relative_positions
self.size_per_head = size_per_head
self.num_attention_heads = num_attention_heads
self.trans_shape_position = (1, 2, 0, 3)
self.trans_shape_relative = (2, 0, 1, 3)
self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head))
self.reshape = P.Reshape()
self.multiply = P.Mul()
self.transpose = P.Transpose()
self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
self.batch_num = batch_size * num_attention_heads
self.cast = P.Cast()
self.cast_compute_type = SaturateCast(dst_type=compute_type)
self._generate_relative_positions_embeddings = \
RelaPosEmbeddingsGenerator(length=self.to_seq_length,
depth=self.size_per_head,
max_relative_position=16,
initializer_range=initializer_range,
use_one_hot_embeddings=use_one_hot_embeddings)
def construct(self, input_tensor, query_layer):
# use_relative_position, supplementary logic
relations_keys_embeddings = self._generate_relative_positions_embeddings()
if self.use_relative_positions:
# 'relations_keys' = [F|T, F|T, H]
relations_keys = self.cast_compute_type(relations_keys_embeddings)
# query_layer_t is [F, B, N, H]
query_layer_t = self.transpose(query_layer, self.trans_shape_relative)
# query_layer_r is [F, B * N, H]
query_layer_r = self.reshape(query_layer_t,
(self.from_seq_length,
self.batch_num,
self.size_per_head))
# key_position_scores is [F, B * N, F|T]
query_layer_r = self.cast(query_layer_r, mstype.float32)
key_position_scores = self.matmul_trans_b(query_layer_r,
relations_keys)
# key_position_scores_r is [F, B, N, F|T]
key_position_scores_r = self.reshape(key_position_scores,
(self.from_seq_length,
self.batch_size,
self.num_attention_heads,
self.from_seq_length))
# key_position_scores_r_t is [B, N, F, F|T]
key_position_scores_r_t = self.transpose(key_position_scores_r,
self.trans_shape_position)
input_tensor = self.cast(input_tensor, mstype.float32)
input_tensor = input_tensor + key_position_scores_r_t
attention_scores = self.multiply(input_tensor, self.scores_mul)
return relations_keys_embeddings, attention_scores
class BertAttentionMask(nn.Cell):
def __init__(self,
has_attention_mask=False,
dtype=mstype.float32):
super(BertAttentionMask, self).__init__()
self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply = P.Mul()
if self.has_attention_mask:
self.expand_dims = P.ExpandDims()
self.sub = P.Sub()
self.add = P.TensorAdd()
self.cast = P.Cast()
self.get_dtype = P.DType()
def construct(self, input_tensor, attention_mask):
attention_scores = input_tensor
attention_scores = self.cast(attention_scores, mstype.float32)
if self.has_attention_mask:
attention_mask = self.expand_dims(attention_mask, 1)
multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), mstype.float32),
self.cast(attention_mask, self.get_dtype(attention_scores)))
adder = self.multiply(multiply_out, self.multiply_data)
attention_scores = self.add(adder, attention_scores)
return attention_scores
class BertAttentionMaskBackward(nn.Cell):
def __init__(self,
attention_mask_shape,
has_attention_mask=False,
dtype=mstype.float32):
super(BertAttentionMaskBackward, self).__init__()
self.has_attention_mask = has_attention_mask
self.multiply_data = Tensor([-1000.0,], dtype=dtype)
self.multiply = P.Mul()
self.attention_mask = Tensor(np.ones(shape=attention_mask_shape).astype(np.float32))
if self.has_attention_mask:
self.expand_dims = P.ExpandDims()
self.sub = P.Sub()
self.add = P.TensorAdd()
self.cast = P.Cast()
self.get_dtype = P.DType()
def construct(self, input_tensor):
attention_scores = input_tensor
attention_scores = self.cast(attention_scores, mstype.float32)
if self.has_attention_mask:
attention_mask = self.expand_dims(self.attention_mask, 1)
multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), mstype.float32),
self.cast(attention_mask, self.get_dtype(attention_scores)))
adder = self.multiply(multiply_out, self.multiply_data)
attention_scores = self.add(adder, attention_scores)
return attention_scores
class BertAttentionSoftmax(nn.Cell):
def __init__(self,
batch_size,
to_tensor_width,
from_seq_length,
to_seq_length,
num_attention_heads=1,
size_per_head=512,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02):
super(BertAttentionSoftmax, self).__init__()
self.to_tensor_width = to_tensor_width
self.value_act = value_act
self.reshape = P.Reshape()
self.shp_to_2d = (-1, self.to_tensor_width)
self.shp_from = (batch_size, from_seq_length, num_attention_heads, size_per_head)
self.shp_to = (
batch_size, to_seq_length, num_attention_heads, size_per_head)
self.trans_shape = (0, 2, 1, 3)
self.trans_shape_start = (0, 1)
self.matmul = P.BatchMatMul()
self.units = num_attention_heads * size_per_head
self.weight = TruncatedNormal(initializer_range)
self.softmax = nn.Softmax()
self.dropout = nn.Dropout(1 - attention_probs_dropout_prob)
self.transpose = P.Transpose()
self.value_layer = nn.Dense(self.to_tensor_width,
self.units,
activation=value_act,
weight_init=self.weight)
self.cast = P.Cast()
def construct(self, to_tensor, attention_scores):
to_tensor = self.transpose(to_tensor, self.trans_shape_start)
to_tensor_2d = self.reshape(to_tensor, self.shp_to_2d)
to_tensor_2d = self.cast(to_tensor_2d, mstype.float32)
value_out = self.value_layer(to_tensor_2d)
attention_probs = self.softmax(attention_scores)
attention_probs = self.cast(attention_probs, mstype.float32)
value_layer = self.reshape(value_out, self.shp_to)
value_layer = self.transpose(value_layer, self.trans_shape)
context_layer = self.matmul(attention_probs, value_layer)
return value_layer, context_layer
class BertAttentionRelativePositionValues(nn.Cell):
def __init__(self,
batch_size,
from_seq_length,
to_seq_length,
num_attention_heads=1,
size_per_head=512,
use_one_hot_embeddings=False,
initializer_range=0.02,
do_return_2d_tensor=False,
use_relative_positions=False,
dtype=mstype.float32,
compute_type=mstype.float32):
super(BertAttentionRelativePositionValues, self).__init__()
self.batch_size = batch_size
self.from_seq_length = from_seq_length
self.to_seq_length = to_seq_length
self.use_relative_positions = use_relative_positions
self.size_per_head = size_per_head
self.num_attention_heads = num_attention_heads
self.trans_shape_position = (1, 2, 0, 3)
self.trans_shape_relative = (2, 0, 1, 3)
self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head))
self.trans_shape = (0, 2, 1, 3)
self.reshape = P.Reshape()
self.multiply = P.Mul()
self.transpose = P.Transpose()
self.batch_num = batch_size * num_attention_heads
self.matmul = P.BatchMatMul()
self.do_return_2d_tensor = do_return_2d_tensor
if self.do_return_2d_tensor:
self.shp_return = (batch_size * from_seq_length, num_attention_heads * size_per_head)
else:
self.shp_return = (batch_size, from_seq_length, num_attention_heads * size_per_head)
self.cast_compute_type = SaturateCast(dst_type=compute_type)
self._generate_relative_positions_embeddings = \
RelaPosEmbeddingsGenerator(length=self.to_seq_length,
depth=self.size_per_head,
max_relative_position=16,
initializer_range=initializer_range,
use_one_hot_embeddings=use_one_hot_embeddings)
self.fill = P.Fill()
self.multiply = P.Mul()
self.type = P.DType()
self.cast = P.Cast()
def construct(self, input_tensor, attention_probs):
# use_relative_position, supplementary logic
relations_values_embedding = self._generate_relative_positions_embeddings() # (128, 128, 64)
if self.use_relative_positions:
# 'relations_values' = [F|T, F|T, H]
relations_values = self.cast_compute_type(relations_values_embedding)
# attention_probs_t is [F, B, N, T]
attention_probs_t = self.transpose(attention_probs, self.trans_shape_relative)
# attention_probs_r is [F, B * N, T]
attention_probs_r = self.reshape(
attention_probs_t,
(self.from_seq_length,
self.batch_num,
self.to_seq_length)) # (128,768,128)
# value_position_scores is [F, B * N, H]
value_position_scores = self.matmul(attention_probs_r,
relations_values)
# value_position_scores_r is [F, B, N, H]
value_position_scores_r = self.reshape(value_position_scores,
(self.from_seq_length,
self.batch_size,
self.num_attention_heads,
self.size_per_head))
# value_position_scores_r_t is [B, N, F, H]
value_position_scores_r_t = self.transpose(value_position_scores_r,
self.trans_shape_position)
input_tensor = input_tensor + value_position_scores_r_t
context_layer = self.transpose(input_tensor, self.trans_shape)
context_layer = self.reshape(context_layer, self.shp_return)
# ge reshape should not return, need an operator here
ones = self.cast(self.fill((1, 1), 1), self.type(context_layer))
context_layer = self.multiply(context_layer, ones)
return relations_values_embedding, context_layer
class BertDense(nn.Cell):
def __init__(self,
hidden_size=768,
intermediate_size=3072,
initializer_range=0.02):
super(BertDense, self).__init__()
self.intermediate = nn.Dense(in_channels=hidden_size,
out_channels=intermediate_size,
activation=None,
weight_init=TruncatedNormal(
initializer_range)
)
self.cast = P.Cast()
def construct(self, attention_output):
attention_output = self.cast(attention_output, mstype.float32)
intermediate_output = self.intermediate(attention_output)
return intermediate_output
| [
"mindspore.ops.operations.Cast",
"mindspore.ops.operations.DType",
"numpy.ones",
"mindspore.ops.operations.Fill",
"mindspore.model_zoo.Bert_NEZHA.bert_model.SaturateCast",
"mindspore.ops.operations.Transpose",
"mindspore.ops.operations.Mul",
"mindspore.ops.operations.TensorAdd",
"mindspore.common.in... | [((1765, 1799), 'mindspore.common.initializer.TruncatedNormal', 'TruncatedNormal', (['initializer_range'], {}), '(initializer_range)\n', (1780, 1799), False, 'from mindspore.common.initializer import TruncatedNormal\n'), ((1866, 1879), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (1877, 1879), True, 'from mindspore.ops import operations as P\n'), ((1903, 1914), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (1912, 1914), True, 'from mindspore.ops import operations as P\n'), ((2050, 2145), 'mindspore.nn.Dense', 'nn.Dense', (['self.from_tensor_width', 'self.units'], {'activation': 'query_act', 'weight_init': 'self.weight'}), '(self.from_tensor_width, self.units, activation=query_act,\n weight_init=self.weight)\n', (2058, 2145), False, 'from mindspore import nn\n'), ((2275, 2367), 'mindspore.nn.Dense', 'nn.Dense', (['self.to_tensor_width', 'self.units'], {'activation': 'key_act', 'weight_init': 'self.weight'}), '(self.to_tensor_width, self.units, activation=key_act, weight_init=\n self.weight)\n', (2283, 2367), False, 'from mindspore import nn\n'), ((2686, 2717), 'mindspore.ops.operations.BatchMatMul', 'P.BatchMatMul', ([], {'transpose_b': '(True)'}), '(transpose_b=True)\n', (2699, 2717), True, 'from mindspore.ops import operations as P\n'), ((2738, 2746), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (2744, 2746), True, 'from mindspore.ops import operations as P\n'), ((4548, 4559), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (4557, 4559), True, 'from mindspore.ops import operations as P\n'), ((4584, 4591), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (4589, 4591), True, 'from mindspore.ops import operations as P\n'), ((4617, 4630), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (4628, 4630), True, 'from mindspore.ops import operations as P\n'), ((4661, 4692), 'mindspore.ops.operations.BatchMatMul', 'P.BatchMatMul', ([], {'transpose_b': '(True)'}), '(transpose_b=True)\n', (4674, 4692), True, 'from mindspore.ops import operations as P\n'), ((4771, 4779), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (4777, 4779), True, 'from mindspore.ops import operations as P\n'), ((4814, 4849), 'mindspore.model_zoo.Bert_NEZHA.bert_model.SaturateCast', 'SaturateCast', ([], {'dst_type': 'compute_type'}), '(dst_type=compute_type)\n', (4826, 4849), False, 'from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator\n'), ((4919, 5118), 'mindspore.model_zoo.Bert_NEZHA.bert_model.RelaPosEmbeddingsGenerator', 'RelaPosEmbeddingsGenerator', ([], {'length': 'self.to_seq_length', 'depth': 'self.size_per_head', 'max_relative_position': '(16)', 'initializer_range': 'initializer_range', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), '(length=self.to_seq_length, depth=self.\n size_per_head, max_relative_position=16, initializer_range=\n initializer_range, use_one_hot_embeddings=use_one_hot_embeddings)\n', (4945, 5118), False, 'from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator\n'), ((7460, 7490), 'mindspore.common.tensor.Tensor', 'Tensor', (['[-1000.0]'], {'dtype': 'dtype'}), '([-1000.0], dtype=dtype)\n', (7466, 7490), False, 'from mindspore.common.tensor import Tensor\n'), ((7516, 7523), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (7521, 7523), True, 'from mindspore.ops import operations as P\n'), ((8697, 8727), 'mindspore.common.tensor.Tensor', 'Tensor', (['[-1000.0]'], {'dtype': 'dtype'}), '([-1000.0], dtype=dtype)\n', (8703, 8727), False, 'from mindspore.common.tensor import Tensor\n'), ((8753, 8760), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (8758, 8760), True, 'from mindspore.ops import operations as P\n'), ((10237, 10248), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (10246, 10248), True, 'from mindspore.ops import operations as P\n'), ((10594, 10609), 'mindspore.ops.operations.BatchMatMul', 'P.BatchMatMul', ([], {}), '()\n', (10607, 10609), True, 'from mindspore.ops import operations as P\n'), ((10690, 10724), 'mindspore.common.initializer.TruncatedNormal', 'TruncatedNormal', (['initializer_range'], {}), '(initializer_range)\n', (10705, 10724), False, 'from mindspore.common.initializer import TruncatedNormal\n'), ((10749, 10761), 'mindspore.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (10759, 10761), False, 'from mindspore import nn\n'), ((10785, 10829), 'mindspore.nn.Dropout', 'nn.Dropout', (['(1 - attention_probs_dropout_prob)'], {}), '(1 - attention_probs_dropout_prob)\n', (10795, 10829), False, 'from mindspore import nn\n'), ((10855, 10868), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (10866, 10868), True, 'from mindspore.ops import operations as P\n'), ((10897, 10990), 'mindspore.nn.Dense', 'nn.Dense', (['self.to_tensor_width', 'self.units'], {'activation': 'value_act', 'weight_init': 'self.weight'}), '(self.to_tensor_width, self.units, activation=value_act,\n weight_init=self.weight)\n', (10905, 10990), False, 'from mindspore import nn\n'), ((11115, 11123), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (11121, 11123), True, 'from mindspore.ops import operations as P\n'), ((12891, 12902), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (12900, 12902), True, 'from mindspore.ops import operations as P\n'), ((12927, 12934), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (12932, 12934), True, 'from mindspore.ops import operations as P\n'), ((12960, 12973), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (12971, 12973), True, 'from mindspore.ops import operations as P\n'), ((13054, 13069), 'mindspore.ops.operations.BatchMatMul', 'P.BatchMatMul', ([], {}), '()\n', (13067, 13069), True, 'from mindspore.ops import operations as P\n'), ((13405, 13440), 'mindspore.model_zoo.Bert_NEZHA.bert_model.SaturateCast', 'SaturateCast', ([], {'dst_type': 'compute_type'}), '(dst_type=compute_type)\n', (13417, 13440), False, 'from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator\n'), ((13510, 13709), 'mindspore.model_zoo.Bert_NEZHA.bert_model.RelaPosEmbeddingsGenerator', 'RelaPosEmbeddingsGenerator', ([], {'length': 'self.to_seq_length', 'depth': 'self.size_per_head', 'max_relative_position': '(16)', 'initializer_range': 'initializer_range', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), '(length=self.to_seq_length, depth=self.\n size_per_head, max_relative_position=16, initializer_range=\n initializer_range, use_one_hot_embeddings=use_one_hot_embeddings)\n', (13536, 13709), False, 'from mindspore.model_zoo.Bert_NEZHA.bert_model import SaturateCast, RelaPosEmbeddingsGenerator\n'), ((13876, 13884), 'mindspore.ops.operations.Fill', 'P.Fill', ([], {}), '()\n', (13882, 13884), True, 'from mindspore.ops import operations as P\n'), ((13909, 13916), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (13914, 13916), True, 'from mindspore.ops import operations as P\n'), ((13937, 13946), 'mindspore.ops.operations.DType', 'P.DType', ([], {}), '()\n', (13944, 13946), True, 'from mindspore.ops import operations as P\n'), ((13967, 13975), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (13973, 13975), True, 'from mindspore.ops import operations as P\n'), ((16621, 16629), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (16627, 16629), True, 'from mindspore.ops import operations as P\n'), ((7592, 7606), 'mindspore.ops.operations.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (7604, 7606), True, 'from mindspore.ops import operations as P\n'), ((7630, 7637), 'mindspore.ops.operations.Sub', 'P.Sub', ([], {}), '()\n', (7635, 7637), True, 'from mindspore.ops import operations as P\n'), ((7661, 7674), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ([], {}), '()\n', (7672, 7674), True, 'from mindspore.ops import operations as P\n'), ((7699, 7707), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (7705, 7707), True, 'from mindspore.ops import operations as P\n'), ((7737, 7746), 'mindspore.ops.operations.DType', 'P.DType', ([], {}), '()\n', (7744, 7746), True, 'from mindspore.ops import operations as P\n'), ((8921, 8935), 'mindspore.ops.operations.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (8933, 8935), True, 'from mindspore.ops import operations as P\n'), ((8959, 8966), 'mindspore.ops.operations.Sub', 'P.Sub', ([], {}), '()\n', (8964, 8966), True, 'from mindspore.ops import operations as P\n'), ((8990, 9003), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ([], {}), '()\n', (9001, 9003), True, 'from mindspore.ops import operations as P\n'), ((9028, 9036), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (9034, 9036), True, 'from mindspore.ops import operations as P\n'), ((9066, 9075), 'mindspore.ops.operations.DType', 'P.DType', ([], {}), '()\n', (9073, 9075), True, 'from mindspore.ops import operations as P\n'), ((16485, 16519), 'mindspore.common.initializer.TruncatedNormal', 'TruncatedNormal', (['initializer_range'], {}), '(initializer_range)\n', (16500, 16519), False, 'from mindspore.common.initializer import TruncatedNormal\n'), ((8061, 8085), 'mindspore.ops.functional.tuple_to_array', 'F.tuple_to_array', (['(1.0,)'], {}), '((1.0,))\n', (8077, 8085), True, 'import mindspore.ops.functional as F\n'), ((8798, 8833), 'numpy.ones', 'np.ones', ([], {'shape': 'attention_mask_shape'}), '(shape=attention_mask_shape)\n', (8805, 8833), True, 'import numpy as np\n'), ((9379, 9403), 'mindspore.ops.functional.tuple_to_array', 'F.tuple_to_array', (['(1.0,)'], {}), '((1.0,))\n', (9395, 9403), True, 'import mindspore.ops.functional as F\n')] |
# -*- coding: utf-8 -*-
"""Collection of functions for the manipulation of time series."""
from __future__ import absolute_import, division, print_function
import sys
import mando
import numpy as np
import pandas as pd
import typic
from mando.rst_text_formatter import RSTHelpFormatter
from .. import tsutils
def _dtw(ts_a, ts_b, d=lambda x, y: abs(x - y), window=10000):
"""Return the DTW similarity distance timeseries numpy arrays.
Parameters
----------
ts_a, ts_b : array of shape [n_samples, n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared
d : DistanceMetric object (default = abs(x-y))
the distance measure used for A_i - B_j in the
DTW dynamic programming function
Returns
-------
DTW distance between A and B
"""
# Create cost matrix via broadcasting with large int
ts_a, ts_b = np.array(ts_a), np.array(ts_b)
M, N = len(ts_a), len(ts_b)
cost = sys.maxsize * np.ones((M, N))
# Initialize the first row and column
cost[0, 0] = d(ts_a[0], ts_b[0])
for i in range(1, M):
cost[i, 0] = cost[i - 1, 0] + d(ts_a[i], ts_b[0])
for j in range(1, N):
cost[0, j] = cost[0, j - 1] + d(ts_a[0], ts_b[j])
# Populate rest of cost matrix within window
for i in range(1, M):
for j in range(max(1, i - window), min(N, i + window)):
choices = cost[i - 1, j - 1], cost[i, j - 1], cost[i - 1, j]
cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])
# Return DTW distance given window
return cost[-1, -1]
@mando.command("dtw", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.docstrings)
def dtw_cli(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
round_index=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
window=10000,
source_units=None,
target_units=None,
tablefmt="csv",
):
"""Dynamic Time Warping.
Parameters
----------
window : int
[optional, default is 10000]
Window length.
{input_ts}
{columns}
{start_date}
{end_date}
{round_index}
{dropna}
{skiprows}
{index_type}
{source_units}
{target_units}
{names}
{clean}
{tablefmt}
"""
tsutils.printiso(
dtw(
input_ts=input_ts,
columns=columns,
start_date=start_date,
end_date=end_date,
round_index=round_index,
dropna=dropna,
skiprows=skiprows,
index_type=index_type,
names=names,
clean=clean,
window=window,
source_units=source_units,
target_units=target_units,
),
tablefmt=tablefmt,
)
@typic.al
def dtw(
input_ts="-",
columns=None,
start_date=None,
end_date=None,
round_index=None,
dropna="no",
skiprows=None,
index_type="datetime",
names=None,
clean=False,
window: int = 10000,
source_units=None,
target_units=None,
):
"""Dynamic Time Warping."""
tsd = tsutils.common_kwds(
input_ts,
skiprows=skiprows,
names=names,
index_type=index_type,
start_date=start_date,
end_date=end_date,
pick=columns,
round_index=round_index,
dropna=dropna,
source_units=source_units,
target_units=target_units,
clean=clean,
)
process = {}
for i in tsd.columns:
for j in tsd.columns:
if (i, j) not in process and (j, i) not in process and i != j:
process[(i, j)] = _dtw(tsd[i], tsd[j], window=window)
ntsd = pd.DataFrame(list(process.items()))
ncols = ntsd.columns
ncols = ["Variables"] + [str(i) + "DTW_score" for i in ncols[1:]]
ntsd.columns = ncols
return ntsd
dtw.__doc__ = dtw_cli.__doc__
| [
"mando.command",
"numpy.array",
"numpy.ones"
] | [((1657, 1728), 'mando.command', 'mando.command', (['"""dtw"""'], {'formatter_class': 'RSTHelpFormatter', 'doctype': '"""numpy"""'}), "('dtw', formatter_class=RSTHelpFormatter, doctype='numpy')\n", (1670, 1728), False, 'import mando\n'), ((964, 978), 'numpy.array', 'np.array', (['ts_a'], {}), '(ts_a)\n', (972, 978), True, 'import numpy as np\n'), ((980, 994), 'numpy.array', 'np.array', (['ts_b'], {}), '(ts_b)\n', (988, 994), True, 'import numpy as np\n'), ((1052, 1067), 'numpy.ones', 'np.ones', (['(M, N)'], {}), '((M, N))\n', (1059, 1067), True, 'import numpy as np\n')] |
import warnings
warnings.filterwarnings("ignore")
import os
import sys
sys.path.append(os.pardir)
import time
import numpy as np
from cifar_data import load_data
from evaluate.evaluate import evaluate_batch
from utils.optimizers import *
from utils.activations import *
from train import data_generator
from model.Base import NetworkBase
# from .cifar_data import load_data
# from .mnist_1 import load_data
class Network(NetworkBase):
def __init__(self, sizes=[100, 100], activation="relu", dropout_rate=0.0):
"""
:param sizes: list of layers
:param activations: activation_functions
"""
self.sizes = sizes
self.num_layers = len(sizes)
self.weights = [np.random.randn(back_layer, forward_layer) * np.sqrt(2.0 / forward_layer)
for forward_layer, back_layer in zip(sizes[:-1], sizes[1:])]
self.biases = [np.random.randn(back_layer, 1) for back_layer in sizes[1:]]
self.dropout_rate = dropout_rate
# TODO activation_functions = {'sigmoid': sigmoid, 'relu': relu} tanh
if activation.lower() == "sigmoid":
self.activation = sigmoid
self.activation_derivative = sigmoid_derivative
elif activation.lower() == "relu":
self.activation = relu
self.activation_derivative = relu_derivative
def predict(self, a):
for w, b in zip(self.weights[:-1], self.biases[:-1]):
a = self.activation(np.dot(w, a) + b)
a *= (1.0 - self.dropout_rate) ######### test dropout
a = np.dot(self.weights[-1], a) + self.biases[-1]
return a
def backprop(self, x, y):
gradient_w = [np.zeros(w.shape) for w in self.weights]
gradient_b = [np.zeros(b.shape) for b in self.biases]
# forward pass #
a = x
a_hold = [x]
z_hold = []
for w, b in zip(self.weights[:-1], self.biases[:-1]):
z = np.dot(w, a) + b
self.mask = np.random.rand(*z.shape) > self.dropout_rate
z *= self.mask
# z /= (1 - self.dropout_rate)
a = self.activation(z)
z_hold.append(z)
a_hold.append(a)
final_layer = np.dot(self.weights[-1], a) + self.biases[-1]
z_hold.append(final_layer)
a_hold.append(softmax(final_layer))
# backward pass#
delta = softmax_derivative(a_hold[-1], y)
gradient_w[-1] = np.dot(delta, a_hold[-2].T)
gradient_b[-1] = delta
for l in range(2, self.num_layers):
delta = np.dot(self.weights[-l + 1].T, delta) * self.activation_derivative(z_hold[-l])
gradient_w[-l] = np.dot(delta, a_hold[-l - 1].T)
gradient_b[-l] = delta
return gradient_w, gradient_b
class Network_mini_batch(NetworkBase):
def __init__(self, sizes=[100, 100], activation="relu"):
"""
:param sizes:
:param activation:
"""
super().__init__(sizes, activation)
self.weights = [np.random.randn(forward_layer, back_layer) * np.sqrt(2.0 / forward_layer) \
for forward_layer, back_layer in zip(sizes[:-1], sizes[1:])]
self.biases = [np.random.randn(layer) for layer in sizes[1:]]
def predict(self, a):
for w, b in zip(self.weights[:-1], self.biases[:-1]):
a = self.activation(np.dot(a, w) + b)
a = np.dot(a, self.weights[-1]) + self.biases[-1]
return a
def backprop(self, x, y):
gradient_w = [np.zeros(w.shape) for w in self.weights]
gradient_b = [np.zeros(b.shape) for b in self.biases]
a = x
a_hold = [x]
z_hold = []
for w, b in zip(self.weights[:-1], self.biases[:-1]):
z = np.dot(a, w) + b # batch z = a * w + b
a = self.activation(z)
z_hold.append(z)
a_hold.append(a)
final_layer = np.dot(a, self.weights[-1]) + self.biases[-1]
z_hold.append(final_layer)
a_hold.append(softmax_batch(final_layer))
delta = softmax_derivative(a_hold[-1], y)
gradient_w[-1] = np.dot(a_hold[-2].T, delta)
gradient_b[-1] = np.sum(delta, axis=0)
for l in range(2, self.num_layers):
delta = np.dot(delta, self.weights[-l + 1].T) * self.activation_derivative(z_hold[-l])
gradient_w[-l] = np.dot(a_hold[-l - 1].T, delta)
gradient_b[-l] = np.sum(delta, axis=0)
return gradient_w, gradient_b
def train_DNN_minibatch(X_train, y_train, num_epochs, optimizer, batch_size, network,
X_test=None, y_test=None, batch_mode="normal"): # balance
for epoch in range(num_epochs):
start = time.time()
if batch_mode == "normal":
random_mask = np.random.choice(len(X_train), len(X_train), replace=False)
X_train = X_train[random_mask]
y_train = y_train[random_mask]
for i in range(0, len(X_train), batch_size):
X_batch = X_train[i: i + batch_size]
y_batch = y_train[i: i + batch_size]
grad_w, grad_b = network.backprop(X_batch, y_batch)
optimizer.update(network.weights, network.biases, grad_w, grad_b)
elif batch_mode == "balance":
data = data_generator.datainit(X_train, y_train, batch_size)
if np.ndim(y_train) > 1:
labels = y_train.argmax(axis=1)
else:
labels = y_train
classes, class_counts = np.unique(labels, return_counts=True)
n_batches = class_counts[0] // (batch_size // len(classes)) + 1
for _ in range(n_batches):
X_batch, y_batch = data.generate_batch()
grad_w, grad_b = network.backprop(X_batch, y_batch)
optimizer.update(network.weights, network.biases, grad_w, grad_b)
if X_test is not None:
train_loss, train_accuracy = evaluate_batch(X_train, y_train, network)
test_loss, test_accuracy = evaluate_batch(X_test, y_test, network)
print("Epoch {}, training loss: {:.4f}, training accuracy: {:.4f}, \n"
"\t validation loss: {:.4f}, validation accuracy: {:.4f}, "
"epoch time: {:.2f}s ".format(
epoch + 1,
train_loss,
train_accuracy,
test_loss,
test_accuracy,
time.time() - start))
else:
train_loss, train_accuracy = evaluate_batch(X_train, y_train, network)
print("Epoch {0}, training loss: {1}, training accuracy: {2}, "
"epoch time: {3}s".format(
epoch + 1,
train_loss,
train_accuracy,
time.time() - start))
if __name__ == "__main__":
np.random.seed(42)
# print("------------------", "Momentum", "----------------------")
# (X_train, y_train), (X_test, y_test) = load_data(normalize=False, standard=True) # standardscale
# dnn = Network_mini_batch(sizes=[3072, 50, 10], activation="relu")
# optimizer = Momentum(lr=1e-3, momentum=0.9, batch_size=32)
# train_DNN_minibatch(X_train, y_train, 100, optimizer, 32, dnn, X_test, y_test)
# print()
print("------------------", "NesterovMomentum", "----------------------")
(X_train, y_train), (X_test, y_test) = load_data(normalize=False, standard=True) # standardscale
dnn = Network_mini_batch(sizes=[3072, 50, 10], activation="relu")
optimizer = NesterovMomentum(lr=1e-3, momentum=0.9, batch_size=128)
train_DNN_minibatch(X_train, y_train, 100, optimizer, 128, dnn, X_test, y_test)
print()
# print("------------------", "Sgd", "----------------------")
# (X_train, y_train), (X_test, y_test) = load_data(normalize=False, standard=True) # standardscale
# dnn = Network_mini_batch(sizes=[3072, 500, 200, 10], activation="relu")
# optimizer = Momentum(lr=0.001, momentum=0.9, batch_size=32)
# optimizer = Adam(lr=0.001, batch_size=32)
# optimizer = Sgd(lr=7e-4, batch_size=32)
# train_DNN_minibatch(X_train, y_train, 100, optimizer, 32, dnn, X_test, y_test)
# print()
print("------------------", "Momentum", "----------------------")
(X_train, y_train), (X_test, y_test) = load_data(normalize=False, standard=True) # standardscale
dnn = Network_mini_batch(sizes=[3072, 500, 200, 10], activation="relu")
optimizer = Momentum(lr=7e-4, momentum=0.9, batch_size=32)
train_DNN_minibatch(X_train, y_train, 100, optimizer, 32, dnn, X_test, y_test)
print()
print("------------------", "Adam", "----------------------")
(X_train, y_train), (X_test, y_test) = load_data(normalize=False, standard=True) # standardscale
dnn = Network_mini_batch(sizes=[3072, 500, 200, 10], activation="relu")
optimizer = Adam(lr=7e-4, batch_size=32)
train_DNN_minibatch(X_train, y_train, 100, optimizer, 32, dnn, X_test, y_test)
print() | [
"sys.path.append",
"numpy.random.seed",
"numpy.sum",
"warnings.filterwarnings",
"numpy.random.randn",
"train.data_generator.datainit",
"cifar_data.load_data",
"numpy.zeros",
"numpy.ndim",
"time.time",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt",
"numpy.unique",
"evaluate.evaluate.eval... | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((71, 97), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (86, 97), False, 'import sys\n'), ((6897, 6915), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6911, 6915), True, 'import numpy as np\n'), ((7444, 7485), 'cifar_data.load_data', 'load_data', ([], {'normalize': '(False)', 'standard': '(True)'}), '(normalize=False, standard=True)\n', (7453, 7485), False, 'from cifar_data import load_data\n'), ((8358, 8399), 'cifar_data.load_data', 'load_data', ([], {'normalize': '(False)', 'standard': '(True)'}), '(normalize=False, standard=True)\n', (8367, 8399), False, 'from cifar_data import load_data\n'), ((8761, 8802), 'cifar_data.load_data', 'load_data', ([], {'normalize': '(False)', 'standard': '(True)'}), '(normalize=False, standard=True)\n', (8770, 8802), False, 'from cifar_data import load_data\n'), ((2451, 2478), 'numpy.dot', 'np.dot', (['delta', 'a_hold[-2].T'], {}), '(delta, a_hold[-2].T)\n', (2457, 2478), True, 'import numpy as np\n'), ((4131, 4158), 'numpy.dot', 'np.dot', (['a_hold[-2].T', 'delta'], {}), '(a_hold[-2].T, delta)\n', (4137, 4158), True, 'import numpy as np\n'), ((4184, 4205), 'numpy.sum', 'np.sum', (['delta'], {'axis': '(0)'}), '(delta, axis=0)\n', (4190, 4205), True, 'import numpy as np\n'), ((4723, 4734), 'time.time', 'time.time', ([], {}), '()\n', (4732, 4734), False, 'import time\n'), ((895, 925), 'numpy.random.randn', 'np.random.randn', (['back_layer', '(1)'], {}), '(back_layer, 1)\n', (910, 925), True, 'import numpy as np\n'), ((1571, 1598), 'numpy.dot', 'np.dot', (['self.weights[-1]', 'a'], {}), '(self.weights[-1], a)\n', (1577, 1598), True, 'import numpy as np\n'), ((1687, 1704), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (1695, 1704), True, 'import numpy as np\n'), ((1750, 1767), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (1758, 1767), True, 'import numpy as np\n'), ((2225, 2252), 'numpy.dot', 'np.dot', (['self.weights[-1]', 'a'], {}), '(self.weights[-1], a)\n', (2231, 2252), True, 'import numpy as np\n'), ((2683, 2714), 'numpy.dot', 'np.dot', (['delta', 'a_hold[-l - 1].T'], {}), '(delta, a_hold[-l - 1].T)\n', (2689, 2714), True, 'import numpy as np\n'), ((3217, 3239), 'numpy.random.randn', 'np.random.randn', (['layer'], {}), '(layer)\n', (3232, 3239), True, 'import numpy as np\n'), ((3415, 3442), 'numpy.dot', 'np.dot', (['a', 'self.weights[-1]'], {}), '(a, self.weights[-1])\n', (3421, 3442), True, 'import numpy as np\n'), ((3531, 3548), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (3539, 3548), True, 'import numpy as np\n'), ((3594, 3611), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (3602, 3611), True, 'import numpy as np\n'), ((3924, 3951), 'numpy.dot', 'np.dot', (['a', 'self.weights[-1]'], {}), '(a, self.weights[-1])\n', (3930, 3951), True, 'import numpy as np\n'), ((4379, 4410), 'numpy.dot', 'np.dot', (['a_hold[-l - 1].T', 'delta'], {}), '(a_hold[-l - 1].T, delta)\n', (4385, 4410), True, 'import numpy as np\n'), ((4440, 4461), 'numpy.sum', 'np.sum', (['delta'], {'axis': '(0)'}), '(delta, axis=0)\n', (4446, 4461), True, 'import numpy as np\n'), ((5974, 6015), 'evaluate.evaluate.evaluate_batch', 'evaluate_batch', (['X_train', 'y_train', 'network'], {}), '(X_train, y_train, network)\n', (5988, 6015), False, 'from evaluate.evaluate import evaluate_batch\n'), ((6055, 6094), 'evaluate.evaluate.evaluate_batch', 'evaluate_batch', (['X_test', 'y_test', 'network'], {}), '(X_test, y_test, network)\n', (6069, 6094), False, 'from evaluate.evaluate import evaluate_batch\n'), ((6563, 6604), 'evaluate.evaluate.evaluate_batch', 'evaluate_batch', (['X_train', 'y_train', 'network'], {}), '(X_train, y_train, network)\n', (6577, 6604), False, 'from evaluate.evaluate import evaluate_batch\n'), ((713, 755), 'numpy.random.randn', 'np.random.randn', (['back_layer', 'forward_layer'], {}), '(back_layer, forward_layer)\n', (728, 755), True, 'import numpy as np\n'), ((758, 786), 'numpy.sqrt', 'np.sqrt', (['(2.0 / forward_layer)'], {}), '(2.0 / forward_layer)\n', (765, 786), True, 'import numpy as np\n'), ((1949, 1961), 'numpy.dot', 'np.dot', (['w', 'a'], {}), '(w, a)\n', (1955, 1961), True, 'import numpy as np\n'), ((1991, 2015), 'numpy.random.rand', 'np.random.rand', (['*z.shape'], {}), '(*z.shape)\n', (2005, 2015), True, 'import numpy as np\n'), ((2575, 2612), 'numpy.dot', 'np.dot', (['self.weights[-l + 1].T', 'delta'], {}), '(self.weights[-l + 1].T, delta)\n', (2581, 2612), True, 'import numpy as np\n'), ((3033, 3075), 'numpy.random.randn', 'np.random.randn', (['forward_layer', 'back_layer'], {}), '(forward_layer, back_layer)\n', (3048, 3075), True, 'import numpy as np\n'), ((3078, 3106), 'numpy.sqrt', 'np.sqrt', (['(2.0 / forward_layer)'], {}), '(2.0 / forward_layer)\n', (3085, 3106), True, 'import numpy as np\n'), ((3768, 3780), 'numpy.dot', 'np.dot', (['a', 'w'], {}), '(a, w)\n', (3774, 3780), True, 'import numpy as np\n'), ((4271, 4308), 'numpy.dot', 'np.dot', (['delta', 'self.weights[-l + 1].T'], {}), '(delta, self.weights[-l + 1].T)\n', (4277, 4308), True, 'import numpy as np\n'), ((5314, 5367), 'train.data_generator.datainit', 'data_generator.datainit', (['X_train', 'y_train', 'batch_size'], {}), '(X_train, y_train, batch_size)\n', (5337, 5367), False, 'from train import data_generator\n'), ((5540, 5577), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (5549, 5577), True, 'import numpy as np\n'), ((1474, 1486), 'numpy.dot', 'np.dot', (['w', 'a'], {}), '(w, a)\n', (1480, 1486), True, 'import numpy as np\n'), ((3385, 3397), 'numpy.dot', 'np.dot', (['a', 'w'], {}), '(a, w)\n', (3391, 3397), True, 'import numpy as np\n'), ((5383, 5399), 'numpy.ndim', 'np.ndim', (['y_train'], {}), '(y_train)\n', (5390, 5399), True, 'import numpy as np\n'), ((6486, 6497), 'time.time', 'time.time', ([], {}), '()\n', (6495, 6497), False, 'import time\n'), ((6841, 6852), 'time.time', 'time.time', ([], {}), '()\n', (6850, 6852), False, 'import time\n')] |
#!/usr/bin/python
#
## @file
#
# Class for rendering multiple images in taken at different magnifications.
# This is used by the steve software and others for image display.
#
# Hazen 07/13
#
import pickle
import numpy
import os
from PyQt5 import QtCore, QtGui, QtWidgets
## MultifieldView
#
# Handles user interaction with the microscope images.
#
# The units here are all in pixels. Subclasses are
# responsible for keeping track (or not) of object
# locations in microns.
#
class MultifieldView(QtWidgets.QGraphicsView):
scaleChange = QtCore.pyqtSignal(float)
## __init__
#
# @param parameters A parameters object.
# @param parent (Optional) The PyQt parent of this object.
#
def __init__(self, parameters, parent=None):
QtWidgets.QGraphicsView.__init__(self, parent)
# class variables
self.bg_brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
self.currentz = 0.0
self.directory = ""
self.image_items = []
self.margin = 8000.0
self.scene_rect = [-self.margin, -self.margin, self.margin, self.margin]
self.view_scale = 1.0
self.zoom_in = 1.2
self.zoom_out = 1.0 / self.zoom_in
self.setMinimumSize(QtCore.QSize(200, 200))
# background brush
self.setBackgroundBrush(self.bg_brush)
# scene initialization
self.scene = QtWidgets.QGraphicsScene()
self.setScene(self.scene)
self.updateSceneRect(0, 0, True)
self.setMouseTracking(True)
self.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
## addViewImageItem
#
# Adds a ViewImageItem to the QGraphicsScene.
#
# We don't use the image objects x and y fields for positioning the image as these give the location
# of the center of the image and the QGraphicsScene uses the upper left corner of the image.
#
# @param image A capure.Image item.
# @param x_pix The x location of the left edge of the image.
# @param y_pix The y location of the top edge of the image.
# @param x_offset_pix The current x offset of this objective relative to the reference objective.
# @param y_offset_pix The current y offset of this objective relative to the reference objective.
# @param magnification The magnification of this objective.
# @param objective The name of the objective (a string).
# @param z_pos The z value to use for this image, this determines which images are in front of other images in the event of overlap.
#
def addViewImageItem(self, image, x_pix, y_pix, x_offset_pix, y_offset_pix, magnification, objective, z_pos):
a_image_item = viewImageItem(x_pix, y_pix, x_offset_pix, y_offset_pix, magnification, objective, z_pos)
a_image_item.initializeWithImageObject(image)
# add the item
self.image_items.append(a_image_item)
self.scene.addItem(a_image_item)
self.centerOn(x_pix, y_pix)
self.updateSceneRect(x_pix, y_pix)
## changeContrast
#
# Change the contrast of all image items
#
# @param contrast_range The new minimum and maximum contrast values (which will control what is set to 0 and to 255)
#
def changeContrast(self, contrast_range):
for item in self.image_items:
item.pixmap_min = contrast_range[0]
item.pixmap_max = contrast_range[1]
item.createPixmap()
## changeImageMagnifications
#
# Update the magnifications of the images taken with the specified objective.
#
# @param objective The objective (a string).
# @param new_magnification The new magnification to use when rendering images taken with this objective.
#
def changeImageMagnifications(self, objective, new_magnification):
for item in self.image_items:
if (item.getObjective() == objective):
item.setMagnification(new_magnification)
## changeImageXOffsets
#
# Update the x offset (relative to the reference objective) of all the images taken with the specified objective.
#
# @param objective The objective (a string).
# @param x_offset_pix The new x offset in pixels.
#
def changeImageXOffsets(self, objective, x_offset_pix):
for item in self.image_items:
if (item.getObjective() == objective):
item.setXOffset(x_offset_pix)
## changeImageYOffsets
#
# Update the y offset (relative to the reference objective) of all the images taken with the specified objective.
#
# @param objective The objective (a string).
# @param y_offset_pix The new y offset in pixels.
#
def changeImageYOffsets(self, objective, y_offset_pix):
for item in self.image_items:
if (item.getObjective() == objective):
item.setYOffset(y_offset_pix)
## clearMosaic
#
# Removes all the viewImageItems from the QGraphicsScene.
#
def clearMosaic(self):
for image_item in self.image_items:
self.scene.removeItem(image_item)
#self.initSceneRect()
self.currentz = 0.0
self.image_items = []
## getContrast
#
# @return The minimum and maximum pixmap values from all image items.
#
def getContrast(self):
if len(self.image_items) >= 1:
min_value = min(item.pixmap_min for item in self.image_items)
max_value = max(item.pixmap_max for item in self.image_items)
return [min_value, max_value]
else:
return [None, None]
## getImageItems
#
# @return An array containing all of the viewImageItems in the scene.
#
def getImageItems(self):
return self.image_items
## handleRemoveLastItem
#
# Removes the last viewImageItem that was added to the scene.
#
# @param boolean Dummy parameter.
#
def handleRemoveLastItem(self, boolean):
if(len(self.image_items) > 0):
item = self.image_items.pop()
self.scene.removeItem(item)
# def initSceneRect(self):
# self.scene_rect = [-self.margin, -self.margin, self.margin, self.margin]
# self.setRect()
## keyPressEvent
#
# @param event A PyQt key press event object.
#
def keyPressEvent(self, event):
# this allows keyboard scrolling to work
QtWidgets.QGraphicsView.keyPressEvent(self, event)
## loadFromMosaicFileData
#
# This is called when we are loading a previously saved mosaic.
#
# @param data A data element from the mosaic file.
# @param directory The directory in which the mosaic file is located.
#
# @return True/False if the data element described a viewImageItem.
#
def loadFromMosaicFileData(self, data, directory):
if (data[0] == "image"):
with open(os.path.join(directory, data[1]), "rb") as fp:
image_dict = pickle.load(fp)
a_image_item = viewImageItem(0, 0, 0, 0, "na", 1.0, 0.0)
a_image_item.setState(image_dict)
self.image_items.append(a_image_item)
self.scene.addItem(a_image_item)
self.centerOn(a_image_item.x_pix, a_image_item.y_pix)
self.updateSceneRect(a_image_item.x_pix, a_image_item.y_pix)
if (self.currentz < a_image_item.zvalue):
self.currentz = a_image_item.zvalue + 0.01
return True
else:
return False
## mousePressEvent
#
# If the left mouse button was pressed, center the scene on the location where the button
# was pressed.
#
# @param event A PyQt mouse press event.
#
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.centerOn(self.mapToScene(event.pos()))
## saveToMosaicFile
#
# Saves all the viewImageItems in the scene into the mosaic file. This adds a line
# to the mosaic file for each viewImageItem containing the file name where the
# viewImageItem was stored. Each viewImageItem is pickled and saved in it's own
# separate file.
#
# @param fileptr The mosaic file pointer.
# @param filename The name of the mosaic file.
#
def saveToMosaicFile(self, fileptr, filename):
progress_bar = QtWidgets.QProgressDialog("Saving Files...",
"Abort Save",
0,
len(self.items()),
self)
progress_bar.setWindowModality(QtCore.Qt.WindowModal)
basename = os.path.splitext(os.path.basename(filename))[0]
dirname = os.path.dirname(filename) + "/"
for i, item in enumerate(self.image_items):
progress_bar.setValue(i)
if progress_bar.wasCanceled(): break
name = basename + "_" + str(i+1)
fileptr.write("image," + name + ".stv\r\n")
with open(dirname + name + ".stv", "wb") as fp:
pickle.dump(item.getState(), fp)
progress_bar.close()
## setScale
#
# Sets the current scale of the view.
#
def setScale(self, scale):
self.view_scale = scale
transform = QtGui.QTransform()
transform.scale(scale, scale)
self.setTransform(transform)
## updateSceneRect
#
# This updates the rectangle describing the overall size of the QGraphicsScene.
#
# @param x_pix A new location in pixels that needs to be visible in the scene.
# @param y_pix A new location in pixels that needs to be visible in the scene.
# @param update (Optional) True/False to force and update of the scene rectangle regardless of x_pix, y_pix.
#
def updateSceneRect(self, x_pix, y_pix, update = False):
needs_update = update
# update scene rect
if (x_pix < (self.scene_rect[0] + self.margin)):
self.scene_rect[0] = x_pix - self.margin
needs_update = True
elif (x_pix > (self.scene_rect[2] - self.margin)):
self.scene_rect[2] = x_pix + self.margin
needs_update = True
if (y_pix < (self.scene_rect[1] + self.margin)):
self.scene_rect[1] = y_pix - self.margin
needs_update = True
elif (y_pix > (self.scene_rect[3] - self.margin)):
self.scene_rect[3] = y_pix + self.margin
needs_update = True
if needs_update:
w = self.scene_rect[2] - self.scene_rect[0]
h = self.scene_rect[3] - self.scene_rect[1]
self.scene.setSceneRect(self.scene_rect[0],
self.scene_rect[1],
w,
h)
## wheelEvent
#
# Handles mouse wheel events, changes the scale at which the scene is rendered
# to emulate zooming in / out.
#
# @param event A PyQt mouse wheel event.
#
def wheelEvent(self, event):
if not event.angleDelta().isNull():
if (event.angleDelta().y() > 0):
self.view_scale = self.view_scale * self.zoom_in
self.setScale(self.view_scale)
else:
self.view_scale = self.view_scale * self.zoom_out
self.setScale(self.view_scale)
self.scaleChange.emit(self.view_scale)
event.accept()
## viewImageItem
#
# Image handling class.
#
# The real position is the stage position in um where
# the picture was taken.
#
class viewImageItem(QtWidgets.QGraphicsItem):
#def __init__(self, pixmap, x_pix, y_pix, x_um, y_um, magnification, name, params, zvalue):
## __init__
#
# @param x_pix X location of the image in pixels.
# @param y_pix Y location of the image in pixels.
# @param x_offset_pix The offset for this objective in x relative to the reference objective in pixels.
# @param y_offset_pix The offset fot this objective in y relative to the reference objective in pixels.
# @param objective_name The name of the objective, a string.
# @param magnification The magnification of the objective.
# @param zvalue The z position of this image.
#
def __init__(self, x_pix, y_pix, x_offset_pix, y_offset_pix, objective_name, magnification, zvalue):
QtWidgets.QGraphicsItem.__init__(self, None)
self.data = False
self.height = 0
self.magnification = magnification
self.objective_name = str(objective_name)
self.parameters_file = ""
self.pixmap = False
self.pixmap_min = 0
self.pixmap_max = 0
self.version = "0.0"
self.width = 0
self.x_offset_pix = x_offset_pix
self.y_offset_pix = y_offset_pix
self.x_pix = x_pix
self.y_pix = y_pix
self.x_um = 0
self.y_um = 0
self.zvalue = zvalue
## boundingRect
#
# @return QtCore.QRectF containing the size of the image.
#
def boundingRect(self):
return QtCore.QRectF(0, 0, self.pixmap.width(), self.pixmap.height())
## createPixmap
#
# Converts the numpy image from HAL to a QtGui.QPixmap.
#
def createPixmap(self):
# This just undoes the transpose that we applied when the image was loaded. It might
# make more sense not to transpose the image in the first place, but this is the standard
# for the storm-analysis project so we maintain that here.
frame = numpy.transpose(self.data.copy())
# Rescale & convert to 8bit
frame = numpy.ascontiguousarray(frame, dtype = numpy.float32)
frame = 255.0 * (frame - float(self.pixmap_min))/float(self.pixmap_max - self.pixmap_min)
frame[(frame > 255.0)] = 255.0
frame[(frame < 0.0)] = 0.0
frame = frame.astype(numpy.uint8)
# Create the pixmap
w, h = frame.shape
image = QtGui.QImage(frame.data, h, w, QtGui.QImage.Format_Indexed8)
image.ndarray = frame
for i in range(256):
image.setColor(i, QtGui.QColor(i,i,i).rgb())
self.pixmap = QtGui.QPixmap.fromImage(image)
## getMagnification
#
# @return The magnification of the image.
#
def getMagnification(self):
return self.magnification
## getObjective
#
# @return The objective the image was taken with.
#
def getObjective(self):
return self.objective_name
## getParameters
#
# This is not used. self.parameters is also not defined..
#
# @return self.parameters.
#
def getParameters(self):
return self.parameters
## getPixmap
#
# @return The image as a QtGui.QPixmap.
#
def getPixmap(self):
return self.pixmap
## getPositionUm
#
# @return [x (um), y (um)]
#
def getPositionUm(self):
return [self.x_um, self.y_um]
## getState
#
# This is used to pickle objects of this class.
#
# @return The dictionary for this object, with 'pixmap' element removed.
#
def getState(self):
odict = self.__dict__.copy()
del odict['pixmap']
return odict
## initializeWithImageObject
#
# Set member variables from a capture.Image object.
#
# @param image A capture.Image object.
#
def initializeWithImageObject(self, image):
self.data = image.data
self.height = image.height
self.parameters_file = image.parameters_file
self.pixmap_min = image.image_min
self.pixmap_max = image.image_max
self.width = image.width
self.x_um = image.x_um
self.y_um = image.y_um
self.createPixmap()
self.setPixmapGeometry()
## initializeWithLegacyMosaicFormat
#
# This is a place-holder, it currently does nothing.
#
# @param legacy_text The text that specifies some of the image properties.
#
def initializeWithLegacyMosaicFormat(self, legacy_text):
pass
## paint
#
# Called by PyQt to render the image.
#
# @param painter A QPainter object.
# @param option A QStyleOptionGraphicsItem object.
# @param widget A QWidget object.
#
def paint(self, painter, option, widget):
painter.drawPixmap(0, 0, self.pixmap)
## setPixmapGeometry
#
# Sets the position, scale and z value of the image.
#
def setPixmapGeometry(self):
self.setPos(self.x_pix + self.x_offset_pix, self.y_pix + self.y_offset_pix)
self.setTransform(QtGui.QTransform().scale(1.0/self.magnification, 1.0/self.magnification))
self.setZValue(self.zvalue)
## setMagnification
#
# FIXME: This also needs to change the x,y coordinates so the image expands/
# contracts from its center, not the upper left hand corner.
#
# @param magnification The new magnification to use for this image.
#
def setMagnification(self, magnification):
self.magnification = magnification
self.setTransform(QtGui.QTransform().scale(1.0/self.magnification, 1.0/self.magnification))
## setRealPosition
#
# This is not used..
#
# @param rx The real position in x.
# @param ry The real position in y.
#
def setRealPosition(self, rx, ry):
self.real_x = rx
self.real_y = ry
## setState
#
# This is used to unpickle objects of this class.
#
# @param image_dict A dictionary that defines the object members.
#
def setState(self, image_dict):
self.__dict__.update(image_dict)
self.createPixmap()
self.setPixmapGeometry()
## setXOffset
#
# @param x_offset The new x_offset to use for positioning this image.
#
def setXOffset(self, x_offset):
self.x_offset_pix = x_offset
self.setPos(self.x_pix + self.x_offset_pix, self.y_pix + self.y_offset_pix)
## setYOffset
#
# @param y_offset The new y_offset to use for positioning this image.
#
def setYOffset(self, y_offset):
self.y_offset_pix = y_offset
self.setPos(self.x_pix + self.x_offset_pix, self.y_pix + self.y_offset_pix)
#
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| [
"PyQt5.QtCore.pyqtSignal",
"PyQt5.QtGui.QPixmap.fromImage",
"os.path.join",
"PyQt5.QtWidgets.QGraphicsView.keyPressEvent",
"PyQt5.QtGui.QColor",
"os.path.basename",
"PyQt5.QtCore.QSize",
"os.path.dirname",
"PyQt5.QtWidgets.QGraphicsItem.__init__",
"PyQt5.QtGui.QImage",
"pickle.load",
"PyQt5.Qt... | [((546, 570), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['float'], {}), '(float)\n', (563, 570), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((766, 812), 'PyQt5.QtWidgets.QGraphicsView.__init__', 'QtWidgets.QGraphicsView.__init__', (['self', 'parent'], {}), '(self, parent)\n', (798, 812), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1383, 1409), 'PyQt5.QtWidgets.QGraphicsScene', 'QtWidgets.QGraphicsScene', ([], {}), '()\n', (1407, 1409), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6352, 6402), 'PyQt5.QtWidgets.QGraphicsView.keyPressEvent', 'QtWidgets.QGraphicsView.keyPressEvent', (['self', 'event'], {}), '(self, event)\n', (6389, 6402), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9325, 9343), 'PyQt5.QtGui.QTransform', 'QtGui.QTransform', ([], {}), '()\n', (9341, 9343), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((12401, 12445), 'PyQt5.QtWidgets.QGraphicsItem.__init__', 'QtWidgets.QGraphicsItem.__init__', (['self', 'None'], {}), '(self, None)\n', (12433, 12445), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((13661, 13712), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['frame'], {'dtype': 'numpy.float32'}), '(frame, dtype=numpy.float32)\n', (13684, 13712), False, 'import numpy\n'), ((14001, 14061), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['frame.data', 'h', 'w', 'QtGui.QImage.Format_Indexed8'], {}), '(frame.data, h, w, QtGui.QImage.Format_Indexed8)\n', (14013, 14061), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14200, 14230), 'PyQt5.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['image'], {}), '(image)\n', (14223, 14230), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((877, 904), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (889, 904), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1231, 1253), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(200)', '(200)'], {}), '(200, 200)\n', (1243, 1253), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((8746, 8771), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (8761, 8771), False, 'import os\n'), ((6913, 6928), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (6924, 6928), False, 'import pickle\n'), ((8697, 8723), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (8713, 8723), False, 'import os\n'), ((6837, 6869), 'os.path.join', 'os.path.join', (['directory', 'data[1]'], {}), '(directory, data[1])\n', (6849, 6869), False, 'import os\n'), ((16620, 16638), 'PyQt5.QtGui.QTransform', 'QtGui.QTransform', ([], {}), '()\n', (16636, 16638), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17107, 17125), 'PyQt5.QtGui.QTransform', 'QtGui.QTransform', ([], {}), '()\n', (17123, 17125), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14151, 14172), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['i', 'i', 'i'], {}), '(i, i, i)\n', (14163, 14172), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
#!/usr/bin/env python3
# ============================ Information =============================
# Project: Texas A&M AutoDrive Challenge - Year 2
# Language: Python 3.5.2
# ROS Package: camera_detection
# Repository: https://github.tamu.edu/kipvasq9/camera_detection
# File Name: camera_detection.py
# Version: 1.0.0
# Description: Provide access to Neural Networks through ROS.
# Date: September 24, 2018
# Author: <NAME> and <NAME>
# Contact: <EMAIL>
# ============================== Imports ===============================
import cv2
import signal
import sys
import numpy as np
import matplotlib.pyplot as plt
import os
import time
import PIL
from PIL import ImageFile
from PIL import Image as PILImage
from std_msgs.msg import Float32MultiArray
ImageFile.LOAD_TRUNCATED_IMAGES = True
# neural network
import tensorflow as tf
import keras
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle
# ros
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
# define tensorflow session
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# use this environment flag to change which GPU to use
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
# adjust this to point to your downloaded/trained model
# models can be downloaded here: https://github.com/fizyr/keras-retinanet/releases
model_path = "/home/autodrive/Desktop/catkin_ws/src/keras-retinanet/snapshots/juan.h5"
model_path = "/home/autodrive/Desktop/catkin_ws/src/keras-retinanet/snapshots/juan.h5"
model = models.load_model(model_path, backbone_name='resnet50')
model._make_predict_function()
# Camera Detection ROS Node Class
class CameraDetectionROSNode:
# Initialize class
def __init__(self):
self.image = None # image variable
self.active = True # should we be processing images
self.obstacle_publisher = rospy.Publisher('/obstacle', ObstacleArray, queue_size = 1)
# CTRL+C signal handler
signal.signal(signal.SIGINT, self.signalInterruptHandler)
# initialize ros node
rospy.init_node('camera_detection_node', anonymous=True)
# set camera image topic subscriber
self.sub_image = rospy.Subscriber('/front_camera/image_raw', Image, self.updateImage, queue_size=1)
# ros to cv mat converter
self.bridge = CvBridge()
# set publishers
self.detect_pub = rospy.Publisher('/detected', Image, queue_size = 1)
self.bbox_pub = rospy.Publisher('/bbox', Float32MultiArray, queue_size = 1)
self.traff_light_pub = rospy.Publisher('/traffic_light',TrafficLightArray, queue_size = 1)
self.traff_sign_pub = rospy.Publisher('/traffic_sign',TrafficSignArray, queue_size = 1)
# load label to names mapping for visualization purposes
self.labels_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat'
,9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat'
,16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack'
,25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball'
,33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle'
,40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich'
,49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch'
,58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote'
,66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book'
,74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}
# begin processing images
self.p2 = np.array([[553.144531, 0.000000, 383.442257, 0.000000], [0.000000, 552.951477, 348.178354, 0.000000], [0.000000, 0.000000, 1.000000, 0]])
self.p2_inv = np.linalg.pinv(self.p2)
self.processImages()
# signal interrupt handler, immediate stop of CAN_Controller
def signalInterruptHandler(self, signum, frame):
#print("Camera Detection - Exiting ROS Node...")
# shut down ROS node and stop processess
rospy.signal_shutdown("CTRL+C Signal Caught.")
self.active = False
sys.exit()
def updateImage(self, image_msg):
# convert and resize
image = self.bridge.imgmsg_to_cv2(image_msg, "rgb8")
#image, scale = resize_image(image)
# save image in class
self.image = image
def processImages(self):
#print("Waiting for Images...")
while(self.image is None):
pass
while(self.active):
self.retinanet(self.image)
def getDistance(self, box,width,pWidth,dist,calibration):
focalPoint = (pWidth * dist)/width
#print("Focal point",focalPoint)
focalPoint *= calibration
pixW = min(abs(box[2]-box[0]),abs(box[3]-box[1]))
#print(pixW)
distance = (width*focalPoint)/abs(box[2]-box[0])
distance = distance/12
#print("Distance: ",(distance-5)*.3048)#*alpha)
return (distance-5)*.3048
def detect_color(self, image,box):
img = image
#img = cv2.imread(imageName)
#img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#img2 = Image.fromarray(img)
#img2 = img2.crop((box[0],box[1],box[2],box[3]))
crop_img = img[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
img3 = np.array(crop_img)
#px = img3[10,10]
##print(px,px[0])
#plt.imshow(img3)
green_score = 0
red_score = 0
width = int(box[2] - box[0])
height = int(box[3] - box[1])
for x in range(0,width):
for y in range(0,height):
px = img3[y,x]
if( ((px[0]>3) and (px[0] < 98)) and ((px[1]>185) and (px[1] <255)) and ((px[2] > 27) and (px[2] <119))):
green_score+= 1
if( ((px[0]>185) and (px[0] < 255)) and ((px[1]>3) and (px[1] <58)) and ((px[2] > 9) and (px[2] <65))):
red_score+= 1
if(green_score > red_score):
return 5
else:
return 1
def retinanet(self, image):
image = image[0:1500,:]
image_copy = image
# convert cv mat to np array through PIL
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
image = PIL.Image.fromarray(image)
image = np.asarray(image.convert('RGB'))[:, :, ::-1].copy()
# copy to draw on
draw = image.copy()
#draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
start = time.time()
#print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
#print("Processing Image...")
boxes, scores, labels = model.predict(np.expand_dims(image, axis=0))
#print("processing time: ", time.time() - start)
# correct for image scale
boxes /= scale
# visualize detections
traffic_light_array = TrafficLightArray()
traffic_sign_array = TrafficSignArray()
obstacle_array = ObstacleArray()
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < 0.5:
break
color = label_color(label)
b = box.astype(int)
draw_box(draw, b, color=color)
caption = "{} {:.3f}".format(self.labels_to_names[label], score)
draw_caption(draw, b, caption)
array_msg = Float32MultiArray()
if(label == 9): #is a traffic light
#120.5 ft
#250 inches
traff_light_msg = TrafficLight()
what_color = self.detect_color(image_copy,box)
distance = self.getDistance(box,15.25,84.96498,250,1.25)
traff_light_msg.type = what_color
traff_light_msg.x = distance
traff_light_msg.y = 0
if what_color == 1:
pass
#print(box,"red")
else:
pass
#print(box,"green")
traffic_light_array.lights.append(traff_light_msg)
if(label == 11): #is a stopsign
traff_sign_msg = TrafficSign()
#getDistance(self, box,width,pWidth,dist,calibration)
#distance = self.getDistance(box,30,93.454895,180,1.15)
distance = self.getDistance(box,30,240.78,160,1.15)
traff_sign_msg.type = 1
traff_sign_msg.x = distance
traff_sign_msg.y = 0
traffic_sign_array.signs.append(traff_sign_msg)
#array_msg.data = list(box)
#self.bbox_pub.publish(array_msg)
if(label == 2):
#send info to Nick
u_min, u_max = box[0]/2.56,box[2]/2.56
v_min, v_max = box[1]/2.56, box[3]/2.56
width = u_max - u_min
if box[2] > 300 and width > 10 and box[0] < 1800:
print(box)
array_msg.data = list(box)
distance = self.getDistance(box,64.4,196,466, 1.13)
print("car distance", distance)
max_hom = np.array([u_max, v_max, 1])
min_hom = np.array([u_min, v_max, 1])
max_p = np.dot(self.p2_inv, max_hom)[:3]
min_p = np.dot(self.p2_inv, min_hom)[:3]
if max_p[2] < 0:
max_p[0], max_p[2] = -max_p[0], -max_p[2]
if min_p[2] < 0:
min_p[0], min_p[2] = -min_p[0], -min_p[2]
max_frustum_angle = (np.arctan2(max_p[0], max_p[2])) * -1
print("max angle", max_frustum_angle)
min_frustum_angle = (np.arctan2(min_p[0], min_p[2])) * -1
print("min angle", min_frustum_angle)
right = (distance-1) * np.tan(max_frustum_angle)
left = (distance-1) * np.tan(min_frustum_angle)
obs = Obstacle()
obs.x1 = distance
obs.x2 = distance + 1.85
obs.y1 = right
obs.y2 = left
obstacle_array.obstacles.append(obs)
print("left:", left,"right:", right,"distance", distance)
#print("I see a " + str(self.labels_to_names[label]) + "(" + str(score) + ")")
self.obstacle_publisher.publish(obstacle_array)
self.traff_light_pub.publish(traffic_light_array)
self.traff_sign_pub.publish(traffic_sign_array)
plt.figure(figsize=(15, 15))
plt.axis('off')
#plt.imshow(draw)
#plt.show()
#cv2.imshow("hi", draw)
#cv2.waitKey(1)
try:
self.detect_pub.publish(self.bridge.cv2_to_imgmsg(draw, "bgr8"))
except CvBridgeError as e:
pass#print(e)
if __name__ == "__main__":
# start camera detection ros node
CameraDetectionROSNode()
| [
"rospy.Subscriber",
"numpy.arctan2",
"keras_retinanet.utils.visualization.draw_caption",
"tensorflow.ConfigProto",
"matplotlib.pyplot.figure",
"path_planner.msg.ObstacleArray",
"numpy.linalg.pinv",
"path_planner.msg.Obstacle",
"keras_retinanet.utils.image.resize_image",
"keras_retinanet.utils.visu... | [((2037, 2092), 'keras_retinanet.models.load_model', 'models.load_model', (['model_path'], {'backbone_name': '"""resnet50"""'}), "(model_path, backbone_name='resnet50')\n", (2054, 2092), False, 'from keras_retinanet import models\n'), ((1409, 1425), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1423, 1425), True, 'import tensorflow as tf\n'), ((1480, 1505), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1490, 1505), True, 'import tensorflow as tf\n'), ((2376, 2433), 'rospy.Publisher', 'rospy.Publisher', (['"""/obstacle"""', 'ObstacleArray'], {'queue_size': '(1)'}), "('/obstacle', ObstacleArray, queue_size=1)\n", (2391, 2433), False, 'import rospy\n'), ((2476, 2533), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.signalInterruptHandler'], {}), '(signal.SIGINT, self.signalInterruptHandler)\n', (2489, 2533), False, 'import signal\n'), ((2573, 2629), 'rospy.init_node', 'rospy.init_node', (['"""camera_detection_node"""'], {'anonymous': '(True)'}), "('camera_detection_node', anonymous=True)\n", (2588, 2629), False, 'import rospy\n'), ((2700, 2786), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/front_camera/image_raw"""', 'Image', 'self.updateImage'], {'queue_size': '(1)'}), "('/front_camera/image_raw', Image, self.updateImage,\n queue_size=1)\n", (2716, 2786), False, 'import rospy\n'), ((2840, 2850), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (2848, 2850), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((2903, 2952), 'rospy.Publisher', 'rospy.Publisher', (['"""/detected"""', 'Image'], {'queue_size': '(1)'}), "('/detected', Image, queue_size=1)\n", (2918, 2952), False, 'import rospy\n'), ((2979, 3036), 'rospy.Publisher', 'rospy.Publisher', (['"""/bbox"""', 'Float32MultiArray'], {'queue_size': '(1)'}), "('/bbox', Float32MultiArray, queue_size=1)\n", (2994, 3036), False, 'import rospy\n'), ((3070, 3136), 'rospy.Publisher', 'rospy.Publisher', (['"""/traffic_light"""', 'TrafficLightArray'], {'queue_size': '(1)'}), "('/traffic_light', TrafficLightArray, queue_size=1)\n", (3085, 3136), False, 'import rospy\n'), ((3168, 3232), 'rospy.Publisher', 'rospy.Publisher', (['"""/traffic_sign"""', 'TrafficSignArray'], {'queue_size': '(1)'}), "('/traffic_sign', TrafficSignArray, queue_size=1)\n", (3183, 3232), False, 'import rospy\n'), ((4836, 4942), 'numpy.array', 'np.array', (['[[553.144531, 0.0, 383.442257, 0.0], [0.0, 552.951477, 348.178354, 0.0], [\n 0.0, 0.0, 1.0, 0]]'], {}), '([[553.144531, 0.0, 383.442257, 0.0], [0.0, 552.951477, 348.178354,\n 0.0], [0.0, 0.0, 1.0, 0]])\n', (4844, 4942), True, 'import numpy as np\n'), ((4996, 5019), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.p2'], {}), '(self.p2)\n', (5010, 5019), True, 'import numpy as np\n'), ((5283, 5329), 'rospy.signal_shutdown', 'rospy.signal_shutdown', (['"""CTRL+C Signal Caught."""'], {}), "('CTRL+C Signal Caught.')\n", (5304, 5329), False, 'import rospy\n'), ((5367, 5377), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5375, 5377), False, 'import sys\n'), ((6572, 6590), 'numpy.array', 'np.array', (['crop_img'], {}), '(crop_img)\n', (6580, 6590), True, 'import numpy as np\n'), ((7462, 7500), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (7474, 7500), False, 'import cv2\n'), ((7517, 7543), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['image'], {}), '(image)\n', (7536, 7543), False, 'import PIL\n'), ((7777, 7800), 'keras_retinanet.utils.image.preprocess_image', 'preprocess_image', (['image'], {}), '(image)\n', (7793, 7800), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((7824, 7843), 'keras_retinanet.utils.image.resize_image', 'resize_image', (['image'], {}), '(image)\n', (7836, 7843), False, 'from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\n'), ((7885, 7896), 'time.time', 'time.time', ([], {}), '()\n', (7894, 7896), False, 'import time\n'), ((8243, 8262), 'path_planner.msg.TrafficLightArray', 'TrafficLightArray', ([], {}), '()\n', (8260, 8262), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((8292, 8310), 'path_planner.msg.TrafficSignArray', 'TrafficSignArray', ([], {}), '()\n', (8308, 8310), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((8336, 8351), 'path_planner.msg.ObstacleArray', 'ObstacleArray', ([], {}), '()\n', (8349, 8351), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((11984, 12012), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (11994, 12012), True, 'import matplotlib.pyplot as plt\n'), ((12021, 12036), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12029, 12036), True, 'import matplotlib.pyplot as plt\n'), ((8035, 8064), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (8049, 8064), True, 'import numpy as np\n'), ((8541, 8559), 'keras_retinanet.utils.colors.label_color', 'label_color', (['label'], {}), '(label)\n', (8552, 8559), False, 'from keras_retinanet.utils.colors import label_color\n'), ((8605, 8635), 'keras_retinanet.utils.visualization.draw_box', 'draw_box', (['draw', 'b'], {'color': 'color'}), '(draw, b, color=color)\n', (8613, 8635), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((8726, 8756), 'keras_retinanet.utils.visualization.draw_caption', 'draw_caption', (['draw', 'b', 'caption'], {}), '(draw, b, caption)\n', (8738, 8756), False, 'from keras_retinanet.utils.visualization import draw_box, draw_caption\n'), ((8782, 8801), 'std_msgs.msg.Float32MultiArray', 'Float32MultiArray', ([], {}), '()\n', (8799, 8801), False, 'from std_msgs.msg import Float32MultiArray\n'), ((8931, 8945), 'path_planner.msg.TrafficLight', 'TrafficLight', ([], {}), '()\n', (8943, 8945), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((9547, 9560), 'path_planner.msg.TrafficSign', 'TrafficSign', ([], {}), '()\n', (9558, 9560), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((10563, 10590), 'numpy.array', 'np.array', (['[u_max, v_max, 1]'], {}), '([u_max, v_max, 1])\n', (10571, 10590), True, 'import numpy as np\n'), ((10621, 10648), 'numpy.array', 'np.array', (['[u_min, v_max, 1]'], {}), '([u_min, v_max, 1])\n', (10629, 10648), True, 'import numpy as np\n'), ((11414, 11424), 'path_planner.msg.Obstacle', 'Obstacle', ([], {}), '()\n', (11422, 11424), False, 'from path_planner.msg import TrafficLightArray, TrafficLight, TrafficSignArray, TrafficSign, ObstacleArray, Obstacle\n'), ((10677, 10705), 'numpy.dot', 'np.dot', (['self.p2_inv', 'max_hom'], {}), '(self.p2_inv, max_hom)\n', (10683, 10705), True, 'import numpy as np\n'), ((10738, 10766), 'numpy.dot', 'np.dot', (['self.p2_inv', 'min_hom'], {}), '(self.p2_inv, min_hom)\n', (10744, 10766), True, 'import numpy as np\n'), ((11019, 11049), 'numpy.arctan2', 'np.arctan2', (['max_p[0]', 'max_p[2]'], {}), '(max_p[0], max_p[2])\n', (11029, 11049), True, 'import numpy as np\n'), ((11155, 11185), 'numpy.arctan2', 'np.arctan2', (['min_p[0]', 'min_p[2]'], {}), '(min_p[0], min_p[2])\n', (11165, 11185), True, 'import numpy as np\n'), ((11293, 11318), 'numpy.tan', 'np.tan', (['max_frustum_angle'], {}), '(max_frustum_angle)\n', (11299, 11318), True, 'import numpy as np\n'), ((11361, 11386), 'numpy.tan', 'np.tan', (['min_frustum_angle'], {}), '(min_frustum_angle)\n', (11367, 11386), True, 'import numpy as np\n')] |
import os
import json
import random
import argparse
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as weight_init
from torch.utils.data import Dataset, DataLoader
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
class OutlierDataset(Dataset):
def __init__(self, X):
self.X = X.astype('float')
def __getitem__(self, index):
x = self.X[index, :]
x = torch.tensor(x, dtype=torch.float32)
return index, x
def __len__(self):
return len(self.X)
class Model(nn.Module):
def __init__(self, input_size, dropout=0.5):
super(Model, self).__init__()
self.dropout = dropout
if self.dropout > 0:
self.dropout = nn.Dropout(dropout)
self.encode_w1 = nn.Linear(input_size, 64)
self.encode_w2 = nn.Linear(64, 32)
self.decode_w1 = nn.Linear(32, 64)
self.decode_w2 = nn.Linear(64, input_size)
def encoder(self, x):
x = self.encode_w1(x)
x = torch.relu(x)
x = self.encode_w2(x)
x = torch.relu(x)
if self.dropout:
x = self.dropout(x)
return x
def decoder(self, x):
x = self.decode_w1(x)
x = torch.relu(x)
x = self.decode_w2(x)
return x
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class Detector(object):
def __init__(
self,
lr=3e-3,
weight_decay=1e-5,
batch_size=128,
epochs=10
):
self.lr = lr
self.weight_decay = weight_decay
self.batch_size = batch_size
self.epochs = epochs
self.threshold = 0.5
def cal_recon_err(self, preds, targets):
recon_err = F.mse_loss(preds, targets, reduction='none').mean(axis=-1)
return recon_err
def cal_loss(self, preds, targets):
loss_mse = self.cal_recon_err(preds, targets)
return loss_mse.mean()
def run_batch(self, batch, train):
idx, x = batch
inputs = x.to(DEVICE)
outputs = self.model(inputs)
if train:
self.optimizer.zero_grad()
train_err = self.cal_recon_err(outputs, inputs)
loss = train_err.mean()
loss.backward()
self.optimizer.step()
else:
loss = self.cal_loss(outputs, inputs)
loss = loss.item()
bsz = inputs.size(0)
return loss * bsz, bsz, train_err.detach().cpu().tolist()
def train(self, epoch=None):
self.model.train()
total_loss = 0
total_cnt = 0
train_errs = []
for batch_idx, batch in enumerate(self.train_iter):
loss, bsz, train_err = self.run_batch(batch, train=True)
total_loss += loss
total_cnt += bsz
train_errs += train_err
status = {'total_loss':total_loss/total_cnt}
mean = np.mean(train_errs)
std = np.std(train_errs)
self.threshold = mean + 2*std
return status
def get_model(self, input_size):
self.model = Model(input_size=input_size).to(DEVICE)
self.optimizer = optim.Adam(self.model.parameters(),
lr=self.lr,
weight_decay=self.weight_decay)
def fit(self, X):
dataset = OutlierDataset(X)
self.train_iter = DataLoader(dataset=dataset,
batch_size=self.batch_size,
shuffle=True)
self.get_model(X.shape[1])
wait = 0
best_loss = 1e9
iteration = tqdm(range(1, self.epochs + 1))
for epoch in iteration:
epoch_status = self.train(epoch)
if best_loss > epoch_status['total_loss']:
best_loss = epoch_status['total_loss']
wait = 0
else:
wait += 1
if wait > 3:
break
return self
def extract(self, X):
dataset = OutlierDataset(X)
outlier_iter = DataLoader(dataset=dataset,
batch_size=self.batch_size)
outlier_idxs = []
self.model.eval()
with torch.no_grad():
for batch in outlier_iter:
idx, x = batch
inputs = x.to(DEVICE)
outputs = self.model(inputs)
recon_err = self.cal_recon_err(outputs, inputs)
outlier_idx = recon_err > self.threshold
outlier_idx = idx[outlier_idx]
outlier_idxs += outlier_idx.tolist()
return outlier_idxs
def fit_extract(self, X, **fit_params):
return self.fit(X, **fit_params).extract(X)
class OutlierDetector(object):
def __init__(self, input_fname, result_path):
self.get_data(input_fname)
self.input_fname = input_fname
self.result_path = result_path
def get_data(self, input_fname):
data = pd.read_csv(input_fname)
num_idx = data.dtypes[data.dtypes != 'object'].index
num_vars = [data.columns.get_loc(idx) for idx in num_idx]
cat_vars = list(set(range(data.shape[1])) - set(num_vars))
self.data = data
self.num_vars = num_vars
self.cat_vars = cat_vars
def write_json(self, outlier_idxs):
obj = {"result": dict()}
obj["result"]["num_outliers"] = len(outlier_idxs)
obj["result"]["outlier_indices"] = outlier_idxs
result_json_fname = os.path.join(self.result_path, "result.json")
with open(result_json_fname, "w") as json_file:
json.dump(obj, json_file)
def run(self):
if not os.path.isdir(self.result_path):
os.makedirs(self.result_path)
X_noise = self.data.iloc[:, self.num_vars]
X_noise = StandardScaler().fit_transform(X_noise)
detector = Detector()
outlier_idxs = detector.fit_extract(X_noise)
self.write_json(outlier_idxs)
n = self.data.shape[0]
idxs = list(range(n))
clear_idxs = list(set(idxs) - set(outlier_idxs))
result_csv_fname = os.path.join(self.result_path, 'result.csv')
self.data.iloc[clear_idxs, :].to_csv(result_csv_fname, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_fname', type=str, default='bank.csv')
parser.add_argument('--result_path', type=str, default='bank_outlier')
args = parser.parse_args()
detector = OutlierDetector(input_fname=args.input_fname, result_path=args.result_path)
detector.run()
| [
"torch.nn.Dropout",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.relu",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"numpy.mean",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"numpy.std",
"random.seed",
"torch.nn.Linear",
"json.dump",
"torch.ma... | [((441, 458), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (452, 458), False, 'import random\n'), ((463, 483), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (477, 483), True, 'import numpy as np\n'), ((488, 511), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (505, 511), False, 'import torch\n'), ((519, 544), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (542, 544), False, 'import torch\n'), ((6694, 6719), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6717, 6719), False, 'import argparse\n'), ((377, 402), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (400, 402), False, 'import torch\n'), ((554, 586), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (580, 586), False, 'import torch\n'), ((763, 799), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (775, 799), False, 'import torch\n'), ((1129, 1154), 'torch.nn.Linear', 'nn.Linear', (['input_size', '(64)'], {}), '(input_size, 64)\n', (1138, 1154), True, 'import torch.nn as nn\n'), ((1180, 1197), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (1189, 1197), True, 'import torch.nn as nn\n'), ((1223, 1240), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(64)'], {}), '(32, 64)\n', (1232, 1240), True, 'import torch.nn as nn\n'), ((1266, 1291), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'input_size'], {}), '(64, input_size)\n', (1275, 1291), True, 'import torch.nn as nn\n'), ((1365, 1378), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1375, 1378), False, 'import torch\n'), ((1421, 1434), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1431, 1434), False, 'import torch\n'), ((1578, 1591), 'torch.relu', 'torch.relu', (['x'], {}), '(x)\n', (1588, 1591), False, 'import torch\n'), ((3329, 3348), 'numpy.mean', 'np.mean', (['train_errs'], {}), '(train_errs)\n', (3336, 3348), True, 'import numpy as np\n'), ((3363, 3381), 'numpy.std', 'np.std', (['train_errs'], {}), '(train_errs)\n', (3369, 3381), True, 'import numpy as np\n'), ((3773, 3842), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self.batch_size', 'shuffle': '(True)'}), '(dataset=dataset, batch_size=self.batch_size, shuffle=True)\n', (3783, 3842), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4438, 4493), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'self.batch_size'}), '(dataset=dataset, batch_size=self.batch_size)\n', (4448, 4493), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5354, 5378), 'pandas.read_csv', 'pd.read_csv', (['input_fname'], {}), '(input_fname)\n', (5365, 5378), True, 'import pandas as pd\n'), ((5890, 5935), 'os.path.join', 'os.path.join', (['self.result_path', '"""result.json"""'], {}), "(self.result_path, 'result.json')\n", (5902, 5935), False, 'import os\n'), ((6531, 6575), 'os.path.join', 'os.path.join', (['self.result_path', '"""result.csv"""'], {}), "(self.result_path, 'result.csv')\n", (6543, 6575), False, 'import os\n'), ((1075, 1094), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1085, 1094), True, 'import torch.nn as nn\n'), ((4584, 4599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4597, 4599), False, 'import torch\n'), ((6004, 6029), 'json.dump', 'json.dump', (['obj', 'json_file'], {}), '(obj, json_file)\n', (6013, 6029), False, 'import json\n'), ((6070, 6101), 'os.path.isdir', 'os.path.isdir', (['self.result_path'], {}), '(self.result_path)\n', (6083, 6101), False, 'import os\n'), ((6115, 6144), 'os.makedirs', 'os.makedirs', (['self.result_path'], {}), '(self.result_path)\n', (6126, 6144), False, 'import os\n'), ((2146, 2190), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['preds', 'targets'], {'reduction': '"""none"""'}), "(preds, targets, reduction='none')\n", (2156, 2190), True, 'import torch.nn.functional as F\n'), ((6223, 6239), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6237, 6239), False, 'from sklearn.preprocessing import StandardScaler\n')] |
#! /usr/bin/env python
# coding=utf-8
# Copyright © 2016 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import json
import pandas as pd
import numpy as np
import scipy.sparse as sp
import os.path
import sys
import itertools
from collections import defaultdict
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
class Tables:
def check_data_exists(self):
for table_name in self.needed_tables:
if not os.path.isfile('data/' + table_name + '.csv'):
sys.exit('[ERROR] Needed table ' + table_name)
def __init__(self):
self.needed_tables = ['Business',
'MemberCouncil',
'Tags',
'BusinessRole',
'Active_People',
'Transcript',
'Session',
'Person',
'adj']
self.check_data_exists()
self.df = {}
for table in self.needed_tables:
self.df[table] = pd.read_csv('data/' + table + '.csv')
def get_friends(self, adj):
# sorts a person's friends in decreasing order of collaboration
dico = {}
for i in adj.index:
row = adj.loc[i].sort_values(ascending=False)
friends = []
for j, k in row.iteritems():
if k.item() > 0:
sub_dico = {'friend': j, 'number': k.item()}
friends.append(sub_dico)
dico[i.item()] = friends
return dico
def cosigner(self):
friends = self.relation_between_person('Auteur', 'Cosignataire')
adj_name = 'adj_cosign'
friends_name = 'friends_cosign'
def get_cosign_friends(matrix, person_number):
x = matrix[person_number,:].nonzero()[1]
y = matrix[person_number,:].data[0]
df = pd.DataFrame({'Person_num':x, 'times_cosigner':y })
df = df.sort_values(by='times_cosigner', ascending=False)
df = df.reset_index(drop=True)
return df
zero_adj = self.df['adj'].set_index('PersonIdCode')
people = self.df['Active_People'].set_index('PersonIdCode')
def fill_adj(adj, people):
# getting a list of active members (we're only interested in them)
active = people.PersonNumber.tolist()
print(active)
# going through an empty adj matrix with PersonIdCodes as rows and columns
for row in adj.iterrows():
person_id = int(row[0])
# converting from PersonIdCode to PersonNumber for friends search
person_number = people.loc[person_id].PersonNumber
# searching co-sign friends w/ the function defined above
friends_matrix = get_cosign_friends(friends, person_number)
# looping through friends to fill the matrix
for friend in friends_matrix.iterrows():
# checking if active
if friend[1].Person_num in active:
# converting from PersonNumber to PersonIdCode
friend_id = int(people.loc[people.PersonNumber == friend[1].Person_num].index.tolist()[0])
# Updating matrix
adj.loc[person_id, str(friend_id)] = friend[1].times_cosigner
return adj
adj = fill_adj(zero_adj, people)
friends = self.get_friends(adj)
filepath = 'data/'+friends_name+'.json'
with open(filepath, 'w') as fp:
json.dump(friends, fp)
print("[INFO] JSON created in file ", filepath)
filepath = 'data/' + adj_name + '.json'
adj.to_csv('data/bitch.csv')
adj.to_json(filepath, orient='index')
print("[INFO] JSON created in file ", filepath)
def opponent(self):
opponents = self.relation_between_person('Auteur', 'Opposant(e)')
return opponents
def relation_between_person(self, role_1, role_2):
""" Number of time two person have an author-cosigner relation"""
# TODO: fill the matrix in a symmetric way (upper right corner first) and then use symmetrize function
df_role = self.df['BusinessRole']
df_member = self.df['MemberCouncil']
# create cosigners table
cosigners = df_role.loc[(df_role.MemberCouncilNumber.notnull()) & (df_role.RoleName == role_1)]
cosigners = cosigners[['BusinessNumber', 'MemberCouncilNumber']]
cosigners = cosigners.astype(int)
print(role_1 + "table shape: ", cosigners.shape)
# create authors table
authors = df_role.loc[(df_role.MemberCouncilNumber.notnull()) & (df_role.RoleName == role_2)]
authors = authors[['BusinessNumber', 'MemberCouncilNumber']]
authors = authors.astype(int)
print(role_2 + "table shape: ", authors.shape)
# create the sparse matrix of right size
max_id = df_member.PersonNumber.max()
friends = sp.lil_matrix((max_id, max_id), dtype=np.int32)
# fill the sparse matrix
def add_to_friend(author, cosigners):
for cosigner in cosigners:
friends[author, cosigner] += 1
friends[cosigner, author] += 1
def fill_matrix(authors, table_cosigners):
for (auteur_num, business_num) in zip(authors.MemberCouncilNumber, authors.BusinessNumber):
cosigners = table_cosigners.loc[table_cosigners.BusinessNumber == business_num]['MemberCouncilNumber']
if cosigners.size != 0:
add_to_friend(auteur_num, cosigners)
fill_matrix(authors, cosigners)
print("Matrix created of size ", friends.nonzero())
return friends
def author(self):
""" Number of times a member of council is author of an initiative"""
df_role = self.df['BusinessRole']
number_initiative = df_role.loc[
(df_role.MemberCouncilNumber.notnull()) & (df_role.RoleName == 'Auteur')
].groupby('MemberCouncilNumber').size()
number_initiative = number_initiative.to_frame(name='author')
number_initiative = number_initiative.reset_index().astype(int)
return number_initiative
def interest(self, cosign=True, auth=True):
""" Most frequent tags in redacted or signed initiative """
df_business = self.df['Business']
df_role = self.df['BusinessRole']
temp = self.df['Tags']
# get tag for each business
df_business_tags = df_business[['ID', 'Tags']].set_index('ID')
df_business_tags['Tags'].fillna("2500", inplace=True)
df_business_tags = df_business_tags.dropna(axis=0)
# create a DataFrame business->tag filled with zero
topics = pd.DataFrame(index=df_business_tags.index)
for i in temp.ID:
topics[i] = 0
# split the tag "4|8|12" in a list(4, 8, 12)
df_business_tags['Tags'] = df_business_tags['Tags'].apply(lambda x: x.split('|'))
# fill the cell of topics table
def fill_cell(business_number, tags_array):
for tag in tags_array:
previous_val = topics.get_value(int(business_number), int(tag))
topics.set_value(int(business_number), int(tag), int(previous_val + 1))
for i, tags in zip(df_business_tags.index, df_business_tags.Tags):
fill_cell(i, tags)
# ensure topics has type integer
topics = topics.astype(int)
# Get a table with (author, co-signer) and the related business
authors = df_role.loc[(df_role.MemberCouncilNumber.notnull())]
if cosign == True and auth == True:
authors = authors.loc[(authors.RoleName == 'Auteur') | (authors.RoleName == 'Cosignataire')]
elif cosign == True and auth == False:
authors = authors.loc[(authors.RoleName == 'Cosignataire')]
else:
authors = authors.loc[(authors.RoleName == 'Auteur')]
authors = authors[['MemberCouncilNumber', 'BusinessNumber']]
authors = authors.astype(int)
# Finally join the table of tagged business and author (or co-signer) to check their interest
interest = authors.join(topics, on='BusinessNumber', how='inner')
interest = interest.sort_values(by='MemberCouncilNumber')
interest = interest.reset_index(drop=True)
interest = interest.groupby('MemberCouncilNumber', as_index=True)[temp.ID].agg(lambda x: x.sum())
# Some decorations
interest.columns = temp.TagName
interest = interest.reset_index()
return interest
def active_interest(self, cosign=True, auth=True):
df_interest = self.interest(cosign, auth)
df_active = self.df['Active_People']
df_interest = df_interest[df_interest.MemberCouncilNumber.isin(df_active.PersonNumber)]
missing = np.array(df_active.PersonNumber[~df_active.PersonNumber.isin(df_interest.MemberCouncilNumber)])
if len(missing) > 0:
n = len(df_interest.columns)
for i in missing:
arr = np.zeros(n)
arr[0] = i
df_interest.loc[-1] = arr
df_interest.index = df_interest.index + 1
df_active = df_active.sort_values(by='PersonNumber')
df_interest = df_interest.sort_values(by='MemberCouncilNumber')
df_interest = df_interest.reset_index()
df_interest['PersonIdCode'] = df_active.PersonIdCode
df_interest = df_interest.drop(['index', 'MemberCouncilNumber'], axis=1)
return df_interest
def get_short_transcripts(self, limit):
# returns a transcript sub-table with only
# transcripts longer than limit
def word_counter(df):
if type(df['Text']) == float:
return 0
else:
return len(df['Text'].split())
transcripts = self.df['Transcript']
# filter transcription with PersonIdField
transcripts = transcripts[np.isfinite(transcripts['PersonNumber'])]
transcripts['PersonNumber'] = transcripts['PersonNumber'].astype(int)
# filter long intervention
transcripts['NumberWord'] = transcripts.apply(word_counter, axis=1)
return transcripts.loc[transcripts.NumberWord > limit]
def interventions(self, filename=None):
transcripts = self.df['Transcript']
sessions = self.df['Session']
persons = self.df['Person']
if filename is None:
filename = 'interventions'
def define_year(df):
return year_dict[df['IdSession']]
def get_year(df):
return int(df.StartDate[:4])
df_long = self.get_short_transcripts(30)
# link session number to year
sessions.set_index('ID', inplace=True)
sessions['StartYear'] = sessions.apply(get_year, axis=1)
year_dict = sessions['StartYear'].to_dict()
year_dict = defaultdict(lambda: 0, year_dict)
# add year field
df_long['year'] = df_long.apply(define_year, axis=1)
# table : PersonNumber, year, interventions
interventions = pd.DataFrame(df_long.groupby(['PersonNumber', 'year']).size().rename('Counts'))
interventions = interventions.reset_index()
print(interventions)
# table : year, median intervention per person
median = interventions.groupby('year').agg('median')
median = median['Counts'].rename('median').astype(int)
# table personNumber, PersonIdCode
persons = persons.dropna(axis=0, subset=['PersonNumber', 'PersonIdCode'])
persons.PersonIdCode = persons.PersonIdCode.astype(int)
persons = persons.set_index('PersonNumber')
persons = persons['PersonIdCode']
# in main table, link to PersonIdCode and median per year
interventions = interventions.join(persons, how='inner', on='PersonNumber')
interventions = interventions.join(median, how='inner', on='year')
# create the dictionnary for Json
interventions.sort_values(by='PersonIdCode', inplace=True)
persons_id = interventions.PersonIdCode.unique()
dic = {}
for person_id in persons_id:
arr = list()
for row in interventions.loc[interventions.PersonIdCode == person_id].values:
internal_dic = {'year': int(row[1]), 'int': int(row[2]), 'median': int(row[4])}
arr.append(internal_dic)
dic[str(person_id)] = arr
# ensure you have all the person, even the one without intervention
for id in persons:
if str(id) not in dic:
dic[str(id)] = [{'year': int(2016), 'int': int(0), 'median': int(18)}]
filepath = 'data/' + filename + '.json'
with open(filepath, 'w') as fp:
json.dump(dic, fp)
print("[INFO] JSON created in file ", filepath)
return dic
def adj_interventions(self):
# outputs an adjacency matrix for common interventions
# as a json file and also a sorted list of friends as
# a json, both are ready for viz
def person_number_to_id(active_ids, ppl):
# builds one PersonNumber PersonIdCode map
# instead of querying
dico = {}
active_numbers = []
for row in ppl.iterrows():
row = row[1]
id_code = int(row['PersonIdCode'])
number = row['PersonNumber']
if id_code in active_ids:
dico[number] = id_code
active_numbers.append(number)
return dico, active_numbers
def combine(l, n):
# gets all permutated n-uples out of the list l
return list(itertools.combinations(l, n))
def update_adj(adj, pair):
# updates adjacency matrix
# creates weighted adj matrix
try:
adj.loc[pair[0], str(pair[1])] += 1
adj.loc[pair[1], str(pair[0])] += 1
#print('adj updated')
except:
#print('adj: not an active pair')
pass
#print(' ')
def populate_adj(adj, df, dico, active_numbers, subjects):
for subj in subjects:
one = df.loc[df.IdSubject == subj]
people = one.PersonNumber.unique().tolist()
pairs = combine(people, 2)
if len(pairs) > 0:
for pair in pairs:
if (pair[0] in active_numbers) and (pair[1] in active_numbers):
pair = [int(dico[pair[0]]), int(dico[pair[1]])]
#print(pair)
update_adj(adj, pair)
#print('subject '+str(subj)+' done!')
return adj
adj_name = 'adj'
friends_name = 'friends'
zero_adj = self.df['adj'].set_index('PersonIdCode')
active_ids = list(zero_adj.index)
ppl = self.df['Person'].dropna(axis=0, subset=['PersonNumber', 'PersonIdCode'])
transcripts = self.get_short_transcripts(30)
subjects = transcripts['IdSubject'].unique().tolist()
dico, active_numbers = person_number_to_id(active_ids, ppl)
adja = populate_adj(zero_adj, transcripts, dico, active_numbers, subjects)
friends = self.get_friends(adja)
filepath = 'data/'+friends_name+'.json'
with open(filepath, 'w') as fp:
json.dump(friends, fp)
filepath = 'data/' + adj_name + '.json'
adja.to_json(filepath, orient='index')
print("[INFO] JSON created in file ", filepath)
return adja
| [
"pandas.DataFrame",
"json.dump",
"pandas.read_csv",
"numpy.zeros",
"numpy.isfinite",
"collections.defaultdict",
"itertools.combinations",
"scipy.sparse.lil_matrix",
"sys.exit"
] | [((5104, 5151), 'scipy.sparse.lil_matrix', 'sp.lil_matrix', (['(max_id, max_id)'], {'dtype': 'np.int32'}), '((max_id, max_id), dtype=np.int32)\n', (5117, 5151), True, 'import scipy.sparse as sp\n'), ((6897, 6939), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_business_tags.index'}), '(index=df_business_tags.index)\n', (6909, 6939), True, 'import pandas as pd\n'), ((11098, 11132), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)', 'year_dict'], {}), '(lambda : 0, year_dict)\n', (11109, 11132), False, 'from collections import defaultdict\n'), ((1099, 1136), 'pandas.read_csv', 'pd.read_csv', (["('data/' + table + '.csv')"], {}), "('data/' + table + '.csv')\n", (1110, 1136), True, 'import pandas as pd\n'), ((1957, 2009), 'pandas.DataFrame', 'pd.DataFrame', (["{'Person_num': x, 'times_cosigner': y}"], {}), "({'Person_num': x, 'times_cosigner': y})\n", (1969, 2009), True, 'import pandas as pd\n'), ((3663, 3685), 'json.dump', 'json.dump', (['friends', 'fp'], {}), '(friends, fp)\n', (3672, 3685), False, 'import json\n'), ((10154, 10194), 'numpy.isfinite', 'np.isfinite', (["transcripts['PersonNumber']"], {}), "(transcripts['PersonNumber'])\n", (10165, 10194), True, 'import numpy as np\n'), ((12986, 13004), 'json.dump', 'json.dump', (['dic', 'fp'], {}), '(dic, fp)\n', (12995, 13004), False, 'import json\n'), ((15675, 15697), 'json.dump', 'json.dump', (['friends', 'fp'], {}), '(friends, fp)\n', (15684, 15697), False, 'import json\n'), ((521, 567), 'sys.exit', 'sys.exit', (["('[ERROR] Needed table ' + table_name)"], {}), "('[ERROR] Needed table ' + table_name)\n", (529, 567), False, 'import sys\n'), ((9236, 9247), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (9244, 9247), True, 'import numpy as np\n'), ((13931, 13959), 'itertools.combinations', 'itertools.combinations', (['l', 'n'], {}), '(l, n)\n', (13953, 13959), False, 'import itertools\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modifications copyright (C) 2017 <NAME>
# ==============================================================================
import time
import numpy
from tensorflow.python.framework import random_seed
from tensorflow.contrib.learn.python.learn.datasets import base
try:
import cPickle as pickle
except:
import pickle as pickle
class DataSet(object):
def __init__(self,
features,
labels,
fake_data=False,
one_hot=False,
scaling=False,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. When `scaling` is true,
it scales the input from `[0, 255]` into `[0, 1]`.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert features.shape[0] == labels.shape[0], (
'features.shape: %s labels.shape: %s' % (features.shape, labels.shape))
self._num_examples = features.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns*depth] (assuming depth == 1)
if scaling:
# Convert from [0, 255] -> [0.0, 1.0].
features = features.astype(numpy.float32)
features = numpy.multiply(features, 1.0 / 255.0)
self._features = features
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def features(self):
return self._features
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, withoutMixWithNextEpoch=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._features = self.features[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
features_rest_part = self._features[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._features = self.features[perm]
self._labels = self.labels[perm]
if withoutMixWithNextEpoch:
self._index_in_epoch = 0
return features_rest_part, labels_rest_part
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
features_new_part = self._features[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((features_rest_part, features_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._features[start:end], self._labels[start:end]
def svm_read_problem(data_file_name, num_features, return_scipy=False):
"""
svm_read_problem(data_file_name, return_scipy=False) -> [y, x], y: list, x: list of dictionary
svm_read_problem(data_file_name, return_scipy=True) -> [y, x], y: ndarray, x: csr_matrix
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
row_ptr = [0]
col_idx = []
i = 0
with open(data_file_name) as fp:
i = 0
lines = fp.readlines()
for line in lines:
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
idx = 1
xi = [0.0] * num_features
for e in features.split():
ind, val = e.split(":")
if int(ind) == idx:
xi[idx-1] = float(val)
else:
while (idx < int(ind)):
idx += 1
xi[idx-1] = float(val)
idx += 1
prob_x += [xi]
prob_y += [float(label)]
i += 1
return (prob_y, prob_x)
def read_dataset(dataset_name,
train_path,
test_path,
num_classes,
num_features,
one_hot=False,
y_label_offset=0,
scaling=False,
validation_size=0,
seed=None):
# Read LIBSVM data
try:
print('Read data from `../data/' + dataset_name + '/train_data.pkl`...')
with open('../data/' + dataset_name + '/train_data.pkl', 'rb') as filehandler:
(y_train, x_train) = pickle.load(filehandler)
except:
print('(No such file or directory: `../data/' + dataset_name + '/train_data.pkl`)')
print('Read data from ' + train_path + '...')
y_train, x_train = svm_read_problem(train_path, num_features)
x_train = numpy.array(x_train); y_train = numpy.array(y_train)
try:
print('Read data from `../data/' + dataset_name + '/test_data.pkl`...')
with open('../data/' + dataset_name + '/test_data.pkl', 'rb') as filehandler:
(y_test, x_test) = pickle.load(filehandler)
except:
print('(No such file or directory: `../data/' + dataset_name + '/test_data.pkl`)')
print('Read data from ' + test_path + '...')
y_test, x_test = svm_read_problem(test_path, num_features)
x_test = numpy.array(x_test); y_test = numpy.array(y_test)
if y_label_offset != 0:
y_test -= y_label_offset
if one_hot:
y_train = dense_to_one_hot(y_train, num_classes)
y_test = dense_to_one_hot(y_test, num_classes)
if not 0 <= validation_size <= len(x_train):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(x_train), validation_size))
validation_features = x_train[:validation_size]
validation_labels = y_train[:validation_size]
x_train = x_train[validation_size:]
y_train = y_train[validation_size:]
options = dict(scaling=scaling, seed=seed)
train = DataSet(x_train, y_train, **options)
validation = DataSet(validation_features, validation_labels, **options)
test = DataSet(x_test, y_test, **options)
return base.Datasets(train=train, validation=validation, test=test)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[list(index_offset + labels_dense.ravel())] = 1
return labels_one_hot
if __name__=='__main__':
num_inst = 10000
CHANNEL = 1
HEIGHT = 28
WIDTH = 28
NUM_CLASSES = 10
TEST_SIZE = 10000.0
train_path = '/home/loong/data/mnist.scale'
test_path = '/home/loong/data/mnist.scale.t'
dataset = read_dataset(train_path, test_path, NUM_CLASSES, CHANNEL, HEIGHT, WIDTH, one_hot=True)
| [
"numpy.random.seed",
"numpy.multiply",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"numpy.concatenate",
"numpy.zeros",
"pickle.load",
"numpy.arange",
"numpy.array",
"tensorflow.python.framework.random_seed.get_seed",
"numpy.random.shuffle"
] | [((7577, 7637), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (7590, 7637), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((7862, 7900), 'numpy.zeros', 'numpy.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (7873, 7900), False, 'import numpy\n'), ((1345, 1371), 'tensorflow.python.framework.random_seed.get_seed', 'random_seed.get_seed', (['seed'], {}), '(seed)\n', (1365, 1371), False, 'from tensorflow.python.framework import random_seed\n'), ((1453, 1504), 'numpy.random.seed', 'numpy.random.seed', (['(seed1 if seed is None else seed2)'], {}), '(seed1 if seed is None else seed2)\n', (1470, 1504), False, 'import numpy\n'), ((7804, 7828), 'numpy.arange', 'numpy.arange', (['num_labels'], {}), '(num_labels)\n', (7816, 7828), False, 'import numpy\n'), ((2754, 2786), 'numpy.arange', 'numpy.arange', (['self._num_examples'], {}), '(self._num_examples)\n', (2766, 2786), False, 'import numpy\n'), ((2793, 2820), 'numpy.random.shuffle', 'numpy.random.shuffle', (['perm0'], {}), '(perm0)\n', (2813, 2820), False, 'import numpy\n'), ((5923, 5947), 'pickle.load', 'pickle.load', (['filehandler'], {}), '(filehandler)\n', (5934, 5947), True, 'import pickle as pickle\n'), ((6194, 6214), 'numpy.array', 'numpy.array', (['x_train'], {}), '(x_train)\n', (6205, 6214), False, 'import numpy\n'), ((6226, 6246), 'numpy.array', 'numpy.array', (['y_train'], {}), '(y_train)\n', (6237, 6246), False, 'import numpy\n'), ((6454, 6478), 'pickle.load', 'pickle.load', (['filehandler'], {}), '(filehandler)\n', (6465, 6478), True, 'import pickle as pickle\n'), ((6719, 6738), 'numpy.array', 'numpy.array', (['x_test'], {}), '(x_test)\n', (6730, 6738), False, 'import numpy\n'), ((6749, 6768), 'numpy.array', 'numpy.array', (['y_test'], {}), '(y_test)\n', (6760, 6768), False, 'import numpy\n'), ((2042, 2079), 'numpy.multiply', 'numpy.multiply', (['features', '(1.0 / 255.0)'], {}), '(features, 1.0 / 255.0)\n', (2056, 2079), False, 'import numpy\n'), ((3324, 3356), 'numpy.arange', 'numpy.arange', (['self._num_examples'], {}), '(self._num_examples)\n', (3336, 3356), False, 'import numpy\n'), ((3365, 3391), 'numpy.random.shuffle', 'numpy.random.shuffle', (['perm'], {}), '(perm)\n', (3385, 3391), False, 'import numpy\n'), ((3844, 3910), 'numpy.concatenate', 'numpy.concatenate', (['(features_rest_part, features_new_part)'], {'axis': '(0)'}), '((features_rest_part, features_new_part), axis=0)\n', (3861, 3910), False, 'import numpy\n'), ((3913, 3975), 'numpy.concatenate', 'numpy.concatenate', (['(labels_rest_part, labels_new_part)'], {'axis': '(0)'}), '((labels_rest_part, labels_new_part), axis=0)\n', (3930, 3975), False, 'import numpy\n')] |
"""
Tests for code generation from SymPy/SymEngine objects.
"""
import pickle
import pytest
import numpy as np
from symengine.lib.symengine_wrapper import LambdaDouble, LLVMDouble
from symengine import zoo
from pycalphad import Model, variables as v
from pycalphad.codegen.callables import build_phase_records
from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions
from pycalphad.tests.fixtures import select_database, load_database
@select_database("alnipt.tdb")
def test_build_functions_options(load_database):
"""The correct SymEngine backend can be chosen for build_functions"""
dbf = load_database()
mod = Model(dbf, ['AL'], 'LIQUID')
int_cons = mod.get_internal_constraints()
backend = 'lambda'
fs_lambda = build_functions(mod.GM, mod.GM.free_symbols,
include_obj=True, func_options={'backend': backend},
include_grad=True, grad_options={'backend': backend},
include_hess=True, hess_options={'backend': backend})
assert isinstance(fs_lambda.func, LambdaDouble)
assert isinstance(fs_lambda.grad, LambdaDouble)
assert isinstance(fs_lambda.hess, LambdaDouble)
cfs_lambda = build_constraint_functions(mod.GM.free_symbols, int_cons,
func_options={'backend': backend},
jac_options={'backend': backend},
hess_options={'backend': backend})
assert isinstance(cfs_lambda.cons_func, LambdaDouble)
assert isinstance(cfs_lambda.cons_jac, LambdaDouble)
assert isinstance(cfs_lambda.cons_hess, LambdaDouble)
backend = 'llvm'
fs_llvm = build_functions(mod.GM, mod.GM.free_symbols,
include_obj=True, func_options={'backend': backend},
include_grad=True, grad_options={'backend': backend},
include_hess=True, hess_options={'backend': backend})
print(fs_llvm.func)
print(fs_lambda.func)
assert isinstance(fs_llvm.func, LLVMDouble)
assert isinstance(fs_llvm.grad, LLVMDouble)
assert isinstance(fs_llvm.hess, LLVMDouble)
cfs_llvm = build_constraint_functions(mod.GM.free_symbols, int_cons,
func_options={'backend': backend},
jac_options={'backend': backend},
hess_options={'backend': backend})
assert isinstance(cfs_llvm.cons_func, LLVMDouble)
assert isinstance(cfs_llvm.cons_jac, LLVMDouble)
assert isinstance(cfs_llvm.cons_hess, LLVMDouble)
@select_database("alnipt.tdb")
def test_phase_records_are_picklable(load_database):
dbf = load_database()
dof = np.array([300, 1.0])
mod = Model(dbf, ['AL'], 'LIQUID')
prxs = build_phase_records(dbf, [v.Species('AL')], ['LIQUID'], [v.T], {'LIQUID': mod}, build_gradients=True, build_hessians=True)
prx_liquid = prxs['LIQUID']
out = np.array([0.0])
prx_liquid.obj(out, dof)
prx_loaded = pickle.loads(pickle.dumps(prx_liquid))
out_unpickled = np.array([0.0])
prx_loaded.obj(out_unpickled, dof)
assert np.isclose(out_unpickled[0], -1037.653911)
assert np.all(out == out_unpickled)
@pytest.mark.xfail
@select_database("cfe_broshe.tdb")
def test_complex_infinity_can_build_callables_successfully(load_database):
"""Test that functions that containing complex infinity can be built with codegen."""
dbf = load_database()
mod = Model(dbf, ['C'], 'DIAMOND_A4')
mod_vars = [v.N, v.P, v.T] + mod.site_fractions
# Test builds functions only, since functions takes about 1 second to run.
# Both lambda and llvm backends take a few seconds to build the derivatives
# and are probably unnecessary to test.
# XXX: SymEngine does not produce a zoo for this case
assert zoo in list(mod.GM.atoms())
build_functions(mod.GM, mod_vars, include_obj=True, include_grad=False, include_hess=False)
int_cons = mod.get_internal_constraints()
build_constraint_functions(mod_vars, int_cons)
| [
"pycalphad.Model",
"pycalphad.codegen.sympydiff_utils.build_constraint_functions",
"numpy.all",
"pycalphad.codegen.sympydiff_utils.build_functions",
"numpy.isclose",
"numpy.array",
"pycalphad.variables.Species",
"pycalphad.tests.fixtures.select_database",
"pycalphad.tests.fixtures.load_database",
... | [((472, 501), 'pycalphad.tests.fixtures.select_database', 'select_database', (['"""alnipt.tdb"""'], {}), "('alnipt.tdb')\n", (487, 501), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((2713, 2742), 'pycalphad.tests.fixtures.select_database', 'select_database', (['"""alnipt.tdb"""'], {}), "('alnipt.tdb')\n", (2728, 2742), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((3364, 3397), 'pycalphad.tests.fixtures.select_database', 'select_database', (['"""cfe_broshe.tdb"""'], {}), "('cfe_broshe.tdb')\n", (3379, 3397), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((635, 650), 'pycalphad.tests.fixtures.load_database', 'load_database', ([], {}), '()\n', (648, 650), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((661, 689), 'pycalphad.Model', 'Model', (['dbf', "['AL']", '"""LIQUID"""'], {}), "(dbf, ['AL'], 'LIQUID')\n", (666, 689), False, 'from pycalphad import Model, variables as v\n'), ((776, 990), 'pycalphad.codegen.sympydiff_utils.build_functions', 'build_functions', (['mod.GM', 'mod.GM.free_symbols'], {'include_obj': '(True)', 'func_options': "{'backend': backend}", 'include_grad': '(True)', 'grad_options': "{'backend': backend}", 'include_hess': '(True)', 'hess_options': "{'backend': backend}"}), "(mod.GM, mod.GM.free_symbols, include_obj=True, func_options\n ={'backend': backend}, include_grad=True, grad_options={'backend':\n backend}, include_hess=True, hess_options={'backend': backend})\n", (791, 990), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((1252, 1423), 'pycalphad.codegen.sympydiff_utils.build_constraint_functions', 'build_constraint_functions', (['mod.GM.free_symbols', 'int_cons'], {'func_options': "{'backend': backend}", 'jac_options': "{'backend': backend}", 'hess_options': "{'backend': backend}"}), "(mod.GM.free_symbols, int_cons, func_options={\n 'backend': backend}, jac_options={'backend': backend}, hess_options={\n 'backend': backend})\n", (1278, 1423), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((1755, 1969), 'pycalphad.codegen.sympydiff_utils.build_functions', 'build_functions', (['mod.GM', 'mod.GM.free_symbols'], {'include_obj': '(True)', 'func_options': "{'backend': backend}", 'include_grad': '(True)', 'grad_options': "{'backend': backend}", 'include_hess': '(True)', 'hess_options': "{'backend': backend}"}), "(mod.GM, mod.GM.free_symbols, include_obj=True, func_options\n ={'backend': backend}, include_grad=True, grad_options={'backend':\n backend}, include_hess=True, hess_options={'backend': backend})\n", (1770, 1969), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((2261, 2432), 'pycalphad.codegen.sympydiff_utils.build_constraint_functions', 'build_constraint_functions', (['mod.GM.free_symbols', 'int_cons'], {'func_options': "{'backend': backend}", 'jac_options': "{'backend': backend}", 'hess_options': "{'backend': backend}"}), "(mod.GM.free_symbols, int_cons, func_options={\n 'backend': backend}, jac_options={'backend': backend}, hess_options={\n 'backend': backend})\n", (2287, 2432), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((2806, 2821), 'pycalphad.tests.fixtures.load_database', 'load_database', ([], {}), '()\n', (2819, 2821), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((2832, 2852), 'numpy.array', 'np.array', (['[300, 1.0]'], {}), '([300, 1.0])\n', (2840, 2852), True, 'import numpy as np\n'), ((2864, 2892), 'pycalphad.Model', 'Model', (['dbf', "['AL']", '"""LIQUID"""'], {}), "(dbf, ['AL'], 'LIQUID')\n", (2869, 2892), False, 'from pycalphad import Model, variables as v\n'), ((3070, 3085), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (3078, 3085), True, 'import numpy as np\n'), ((3192, 3207), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (3200, 3207), True, 'import numpy as np\n'), ((3259, 3301), 'numpy.isclose', 'np.isclose', (['out_unpickled[0]', '(-1037.653911)'], {}), '(out_unpickled[0], -1037.653911)\n', (3269, 3301), True, 'import numpy as np\n'), ((3313, 3341), 'numpy.all', 'np.all', (['(out == out_unpickled)'], {}), '(out == out_unpickled)\n', (3319, 3341), True, 'import numpy as np\n'), ((3573, 3588), 'pycalphad.tests.fixtures.load_database', 'load_database', ([], {}), '()\n', (3586, 3588), False, 'from pycalphad.tests.fixtures import select_database, load_database\n'), ((3599, 3630), 'pycalphad.Model', 'Model', (['dbf', "['C']", '"""DIAMOND_A4"""'], {}), "(dbf, ['C'], 'DIAMOND_A4')\n", (3604, 3630), False, 'from pycalphad import Model, variables as v\n'), ((3988, 4083), 'pycalphad.codegen.sympydiff_utils.build_functions', 'build_functions', (['mod.GM', 'mod_vars'], {'include_obj': '(True)', 'include_grad': '(False)', 'include_hess': '(False)'}), '(mod.GM, mod_vars, include_obj=True, include_grad=False,\n include_hess=False)\n', (4003, 4083), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((4131, 4177), 'pycalphad.codegen.sympydiff_utils.build_constraint_functions', 'build_constraint_functions', (['mod_vars', 'int_cons'], {}), '(mod_vars, int_cons)\n', (4157, 4177), False, 'from pycalphad.codegen.sympydiff_utils import build_functions, build_constraint_functions\n'), ((3146, 3170), 'pickle.dumps', 'pickle.dumps', (['prx_liquid'], {}), '(prx_liquid)\n', (3158, 3170), False, 'import pickle\n'), ((2930, 2945), 'pycalphad.variables.Species', 'v.Species', (['"""AL"""'], {}), "('AL')\n", (2939, 2945), True, 'from pycalphad import Model, variables as v\n')] |
import numpy as np
from scipy import ndimage
import mahotas.center_of_mass
np.random.seed(2321)
def _mean_out(img, axis):
if len(img.shape) == 2: return img.mean(1-axis)
if axis == 0:
return _mean_out(img.mean(1), 0)
return _mean_out(img.mean(0), axis - 1)
def slow_center_of_mass(img):
'''
Returns the center of mass of img.
'''
xs = []
for axis,si in enumerate(img.shape):
xs.append(np.mean(_mean_out(img, axis) * np.arange(si)))
xs = np.array(xs)
xs /= img.mean()
return xs
def test_cmp_ndimage():
R = (255*np.random.rand(128,256)).astype(np.uint16)
R += np.arange(256, dtype=np.uint16)
m0,m1 = mahotas.center_of_mass(R)
n0,n1 = ndimage.center_of_mass(R)
assert np.abs(n0 - m0) < 1.
assert np.abs(n1 - m1) < 1.
def test_cmp_ndimage3():
R = (255*np.random.rand(32,128,8,16)).astype(np.uint16)
R += np.arange(16, dtype=np.uint16)
m = mahotas.center_of_mass(R)
n = ndimage.center_of_mass(R)
p = slow_center_of_mass(R)
assert np.abs(n - m).max() < 1.
assert np.abs(p - m).max() < 1.
def test_simple():
R = (255*np.random.rand(128,256)).astype(np.uint16)
R += np.arange(256, dtype=np.uint16)
m0,m1 = mahotas.center_of_mass(R)
assert 0 < m0 < 128
assert 0 < m1 < 256
def test_labels():
R = (255*np.random.rand(128,256)).astype(np.uint16)
labels = np.zeros(R.shape, np.intc)
labels[100:,:] += 1
labels[100:,100:] += 1
centres = mahotas.center_of_mass(R, labels)
for label,cm in enumerate(centres):
assert np.all(cm == mahotas.center_of_mass(R * (labels == label)))
def test_labels_not_intc():
img = np.arange(256).reshape((16,16))
labels = img.copy()
labels %= 3
cm = mahotas.center_of_mass(img, labels)
assert cm.shape == (3,2)
labels = labels.T.copy()
cm = mahotas.center_of_mass(img, labels.T)
assert cm.shape == (3,2)
labels = labels.T.copy()
labels = labels.astype(np.uint16)
cm = mahotas.center_of_mass(img, labels)
assert cm.shape == (3,2)
| [
"scipy.ndimage.center_of_mass",
"numpy.random.seed",
"numpy.abs",
"numpy.zeros",
"numpy.array",
"numpy.arange",
"numpy.random.rand"
] | [((76, 96), 'numpy.random.seed', 'np.random.seed', (['(2321)'], {}), '(2321)\n', (90, 96), True, 'import numpy as np\n'), ((491, 503), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (499, 503), True, 'import numpy as np\n'), ((630, 661), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'np.uint16'}), '(256, dtype=np.uint16)\n', (639, 661), True, 'import numpy as np\n'), ((712, 737), 'scipy.ndimage.center_of_mass', 'ndimage.center_of_mass', (['R'], {}), '(R)\n', (734, 737), False, 'from scipy import ndimage\n'), ((897, 927), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.uint16'}), '(16, dtype=np.uint16)\n', (906, 927), True, 'import numpy as np\n'), ((970, 995), 'scipy.ndimage.center_of_mass', 'ndimage.center_of_mass', (['R'], {}), '(R)\n', (992, 995), False, 'from scipy import ndimage\n'), ((1184, 1215), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'np.uint16'}), '(256, dtype=np.uint16)\n', (1193, 1215), True, 'import numpy as np\n'), ((1393, 1419), 'numpy.zeros', 'np.zeros', (['R.shape', 'np.intc'], {}), '(R.shape, np.intc)\n', (1401, 1419), True, 'import numpy as np\n'), ((749, 764), 'numpy.abs', 'np.abs', (['(n0 - m0)'], {}), '(n0 - m0)\n', (755, 764), True, 'import numpy as np\n'), ((781, 796), 'numpy.abs', 'np.abs', (['(n1 - m1)'], {}), '(n1 - m1)\n', (787, 796), True, 'import numpy as np\n'), ((1676, 1690), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (1685, 1690), True, 'import numpy as np\n'), ((578, 602), 'numpy.random.rand', 'np.random.rand', (['(128)', '(256)'], {}), '(128, 256)\n', (592, 602), True, 'import numpy as np\n'), ((841, 871), 'numpy.random.rand', 'np.random.rand', (['(32)', '(128)', '(8)', '(16)'], {}), '(32, 128, 8, 16)\n', (855, 871), True, 'import numpy as np\n'), ((1038, 1051), 'numpy.abs', 'np.abs', (['(n - m)'], {}), '(n - m)\n', (1044, 1051), True, 'import numpy as np\n'), ((1074, 1087), 'numpy.abs', 'np.abs', (['(p - m)'], {}), '(p - m)\n', (1080, 1087), True, 'import numpy as np\n'), ((1132, 1156), 'numpy.random.rand', 'np.random.rand', (['(128)', '(256)'], {}), '(128, 256)\n', (1146, 1156), True, 'import numpy as np\n'), ((1337, 1361), 'numpy.random.rand', 'np.random.rand', (['(128)', '(256)'], {}), '(128, 256)\n', (1351, 1361), True, 'import numpy as np\n'), ((466, 479), 'numpy.arange', 'np.arange', (['si'], {}), '(si)\n', (475, 479), True, 'import numpy as np\n')] |
import numpy as np
import graphlearning as gl
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
from utils import peikonal_depth
import sys
k = 20
frac=0.05
alpha=2
#Plotting
numw = 16
numh = 10
for dataset in ['mnist','fashionmnist']:
f_bdy, axarr_bdy = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})
f_peikonal_median, axarr_peikonal = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})
f_bdy.suptitle('Boundary images')
f_peikonal_median.suptitle('peikonal Median images')
X, labels = gl.datasets.load(dataset)
pathID = np.zeros((10,200))
for label in range(10):
print("Digit %d..."%label)
#Subset labels
X_sub = X[labels==label,:]
num = X_sub.shape[0]
#KNN search
knn_ind, knn_dist = gl.weightmatrix.knnsearch(X_sub,30)
W = gl.weightmatrix.knn(X_sub,k,knn_data=(knn_ind,knn_dist))
G = gl.graph(W)
if not G.isconnected():
sys.exit('Graph is not connected')
d = np.max(knn_dist,axis=1)
kde = (d/d.max())**(-1)
median, depth = peikonal_depth(G, kde, frac, alpha)
depth = depth/np.max(depth)
depth = 1-depth
ind_boundary = np.argsort(+depth)
ind_peikonal = np.argsort(-depth)
b_indx = ind_boundary[0]
m_indx = ind_peikonal[0]
W = W.tocsr()
neigh_num = 20
b_indx_up = b_indx
pathID[label,0] = b_indx
maxItt = 1e2
dp = 0
cnt = 0
while (dp < 1) and (cnt < maxItt):
cnt += 1
#xnId = knn_ind[b_indx_up,1:neigh_num]
xnId = W[b_indx_up,:].nonzero()[1]
wnId = depth[xnId]
wnMx = np.argmax(wnId)
b_indx_up = xnId[wnMx]
pathID[label,cnt] = b_indx_up
dp = depth[b_indx_up]
print(dp)
#Visualization
for j in range(numw):
img = X_sub[ind_boundary[j],:]
m = int(np.sqrt(img.shape[0]))
img = np.reshape(img,(m,m))
if dataset.lower() == 'mnist':
img = np.transpose(img)
axarr_bdy[label,j].imshow(img,cmap='gray')
axarr_bdy[label,j].axis('off')
axarr_bdy[label,j].set_aspect('equal')
img = X_sub[ind_peikonal[j],:]
m = int(np.sqrt(img.shape[0]))
img = np.reshape(img,(m,m))
if dataset.lower() == 'mnist':
img = np.transpose(img)
axarr_peikonal[label,j].imshow(img,cmap='gray')
axarr_peikonal[label,j].axis('off')
axarr_peikonal[label,j].set_aspect('equal')
f_bdy.savefig('figures/'+dataset+'_boundary.png')
f_peikonal_median.savefig('figures/'+dataset+'_peikonal_median.png')
# path from boundary to median plots
columns = 1
for i in range(10):
x = pathID[i,:]
indx = np.nonzero(x)
digitIndx = indx[0]
lp = len(digitIndx)
if (lp > columns):
columns = lp
#Plotting
numw = columns
numh = 10
f_peikonal_path, axarr_peikonal_path = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})
f_peikonal_path.suptitle('peikonal boundary to median images')
img = X_sub[0,:]
lm = img.shape[0]
for label in range(10):
x = pathID[label,:]
indx = np.nonzero(x)
digitIndx = indx[0]
lp = len(digitIndx)
path = pathID[label,digitIndx]
X_sub = X[labels==label,:]
#Visualization
for j in range(numw):
if (j < lp):
i = int(path[j])
img = X_sub[i,:]
m = int(np.sqrt(img.shape[0]))
img = np.reshape(img,(m,m))
if dataset.lower() == 'mnist':
img = np.transpose(img)
axarr_peikonal_path[label,j].imshow(img,cmap='gray')
else:
img = np.ones(lm)
m = int(np.sqrt(img.shape[0]))
img = np.reshape(img,(m,m))
axarr_peikonal_path[label,j].imshow(img,cmap='binary')
axarr_peikonal_path[label,j].axis('off')
axarr_peikonal_path[label,j].set_aspect('equal')
f_peikonal_path.savefig('figures/'+dataset+'_peikonal_path.png')
plt.show()
| [
"graphlearning.weightmatrix.knnsearch",
"matplotlib.pyplot.show",
"utils.peikonal_depth",
"numpy.argmax",
"numpy.zeros",
"graphlearning.datasets.load",
"numpy.transpose",
"numpy.ones",
"numpy.argsort",
"numpy.nonzero",
"numpy.max",
"numpy.reshape",
"sys.exit",
"matplotlib.pyplot.subplots",... | [((4400, 4410), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4408, 4410), True, 'import matplotlib.pyplot as plt\n'), ((282, 350), 'matplotlib.pyplot.subplots', 'plt.subplots', (['numh', 'numw'], {'gridspec_kw': "{'wspace': 0.1, 'hspace': 0.1}"}), "(numh, numw, gridspec_kw={'wspace': 0.1, 'hspace': 0.1})\n", (294, 350), True, 'import matplotlib.pyplot as plt\n'), ((386, 454), 'matplotlib.pyplot.subplots', 'plt.subplots', (['numh', 'numw'], {'gridspec_kw': "{'wspace': 0.1, 'hspace': 0.1}"}), "(numh, numw, gridspec_kw={'wspace': 0.1, 'hspace': 0.1})\n", (398, 454), True, 'import matplotlib.pyplot as plt\n'), ((563, 588), 'graphlearning.datasets.load', 'gl.datasets.load', (['dataset'], {}), '(dataset)\n', (579, 588), True, 'import graphlearning as gl\n'), ((602, 621), 'numpy.zeros', 'np.zeros', (['(10, 200)'], {}), '((10, 200))\n', (610, 621), True, 'import numpy as np\n'), ((3193, 3261), 'matplotlib.pyplot.subplots', 'plt.subplots', (['numh', 'numw'], {'gridspec_kw': "{'wspace': 0.1, 'hspace': 0.1}"}), "(numh, numw, gridspec_kw={'wspace': 0.1, 'hspace': 0.1})\n", (3205, 3261), True, 'import matplotlib.pyplot as plt\n'), ((822, 858), 'graphlearning.weightmatrix.knnsearch', 'gl.weightmatrix.knnsearch', (['X_sub', '(30)'], {}), '(X_sub, 30)\n', (847, 858), True, 'import graphlearning as gl\n'), ((870, 929), 'graphlearning.weightmatrix.knn', 'gl.weightmatrix.knn', (['X_sub', 'k'], {'knn_data': '(knn_ind, knn_dist)'}), '(X_sub, k, knn_data=(knn_ind, knn_dist))\n', (889, 929), True, 'import graphlearning as gl\n'), ((939, 950), 'graphlearning.graph', 'gl.graph', (['W'], {}), '(W)\n', (947, 950), True, 'import graphlearning as gl\n'), ((1042, 1066), 'numpy.max', 'np.max', (['knn_dist'], {'axis': '(1)'}), '(knn_dist, axis=1)\n', (1048, 1066), True, 'import numpy as np\n'), ((1131, 1166), 'utils.peikonal_depth', 'peikonal_depth', (['G', 'kde', 'frac', 'alpha'], {}), '(G, kde, frac, alpha)\n', (1145, 1166), False, 'from utils import peikonal_depth\n'), ((1260, 1278), 'numpy.argsort', 'np.argsort', (['(+depth)'], {}), '(+depth)\n', (1270, 1278), True, 'import numpy as np\n'), ((1302, 1320), 'numpy.argsort', 'np.argsort', (['(-depth)'], {}), '(-depth)\n', (1312, 1320), True, 'import numpy as np\n'), ((2967, 2980), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (2977, 2980), True, 'import numpy as np\n'), ((3440, 3453), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (3450, 3453), True, 'import numpy as np\n'), ((995, 1029), 'sys.exit', 'sys.exit', (['"""Graph is not connected"""'], {}), "('Graph is not connected')\n", (1003, 1029), False, 'import sys\n'), ((1190, 1203), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (1196, 1203), True, 'import numpy as np\n'), ((1784, 1799), 'numpy.argmax', 'np.argmax', (['wnId'], {}), '(wnId)\n', (1793, 1799), True, 'import numpy as np\n'), ((2087, 2110), 'numpy.reshape', 'np.reshape', (['img', '(m, m)'], {}), '(img, (m, m))\n', (2097, 2110), True, 'import numpy as np\n'), ((2446, 2469), 'numpy.reshape', 'np.reshape', (['img', '(m, m)'], {}), '(img, (m, m))\n', (2456, 2469), True, 'import numpy as np\n'), ((2046, 2067), 'numpy.sqrt', 'np.sqrt', (['img.shape[0]'], {}), '(img.shape[0])\n', (2053, 2067), True, 'import numpy as np\n'), ((2174, 2191), 'numpy.transpose', 'np.transpose', (['img'], {}), '(img)\n', (2186, 2191), True, 'import numpy as np\n'), ((2405, 2426), 'numpy.sqrt', 'np.sqrt', (['img.shape[0]'], {}), '(img.shape[0])\n', (2412, 2426), True, 'import numpy as np\n'), ((2533, 2550), 'numpy.transpose', 'np.transpose', (['img'], {}), '(img)\n', (2545, 2550), True, 'import numpy as np\n'), ((3807, 3830), 'numpy.reshape', 'np.reshape', (['img', '(m, m)'], {}), '(img, (m, m))\n', (3817, 3830), True, 'import numpy as np\n'), ((4029, 4040), 'numpy.ones', 'np.ones', (['lm'], {}), '(lm)\n', (4036, 4040), True, 'import numpy as np\n'), ((4110, 4133), 'numpy.reshape', 'np.reshape', (['img', '(m, m)'], {}), '(img, (m, m))\n', (4120, 4133), True, 'import numpy as np\n'), ((3762, 3783), 'numpy.sqrt', 'np.sqrt', (['img.shape[0]'], {}), '(img.shape[0])\n', (3769, 3783), True, 'import numpy as np\n'), ((3902, 3919), 'numpy.transpose', 'np.transpose', (['img'], {}), '(img)\n', (3914, 3919), True, 'import numpy as np\n'), ((4065, 4086), 'numpy.sqrt', 'np.sqrt', (['img.shape[0]'], {}), '(img.shape[0])\n', (4072, 4086), True, 'import numpy as np\n')] |
import time
import torch
import numpy as np
from pathlib import Path
from transformers import WEIGHTS_NAME, CONFIG_NAME
def init_seed():
seed_val = 42
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def save_model(model, output_dir):
output_dir = Path(output_dir)
# Step 1: Save a model, configuration and vocabulary that you have fine-tuned
# If we have a distributed model, save only the encapsulated model
# (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
model_to_save = model.module if hasattr(model, 'module') else model
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = output_dir / WEIGHTS_NAME
output_config_file = output_dir / CONFIG_NAME
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
#src_tokenizer.save_vocabulary(output_dir)
def load_model():
pass
# Function to calculate the accuracy of our predictions vs labels
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=2).flatten()
labels_flat = labels.flatten()
#print (f'preds: {pred_flat}')
#print (f'labels: {labels_flat}')
return np.sum(np.equal(pred_flat, labels_flat)) / len(labels_flat)
import pytorch_lightning as pl
from pytorch_lightning import Trainer
class MyLightninModule(pl.LightningModule):
def __init__(self, num_class):
super(MyLightninModule, self).__init__()
self.model = get_model(num_class=num_class)
self.criterion = get_criterion()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
# REQUIRED
x, y = batch
y_hat = self.forward(x)
loss = self.criterion(y_hat, y)
logs = {'train_loss': loss}
return {'loss': loss, 'log': logs, 'progress_bar': logs}
def validation_step(self, batch, batch_idx):
# OPTIONAL
x, y = batch
y_hat = self.forward(x)
preds = torch.argmax(y_hat, dim=1)
return {'val_loss': self.criterion(y_hat, y), 'correct': (preds == y).float()}
def validation_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
acc = torch.cat([x['correct'] for x in outputs]).mean()
logs = {'val_loss': avg_loss, 'val_acc': acc}
return {'avg_val_loss': avg_loss, 'log': logs}
def configure_optimizers(self):
# REQUIRED
optimizer, scheduler = get_optimizer(model=self.model)
return [optimizer], [scheduler]
@pl.data_loader
def train_dataloader(self):
# REQUIRED
return get_loaders()[0]
@pl.data_loader
def val_dataloader(self):
# OPTIONAL
return get_loaders()[1]
def run_train(config, model, train_loader, eval_loader, writer):
init_seed()
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader), eta_min=config.lr)
training_loss_values = []
validation_loss_values = []
validation_accuracy_values = []
for epoch in range(config.epochs):
model.train()
print('======== Epoch {:} / {:} ========'.format(epoch + 1, config.epochs))
start_time = time.time()
total_loss = 0
for batch_no, batch in enumerate(train_loader):
source = batch[0].to(device)
target = batch[1].to(device)
model.zero_grad()
loss, logits = model(source, target)
total_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = target.to('cpu').numpy()
loss.backward()
optimizer.step()
scheduler.step()
#Logging the loss and accuracy (below) in Tensorboard
avg_train_loss = total_loss / len(train_loader)
training_loss_values.append(avg_train_loss)
for name, weights in model.named_parameters():
writer.add_histogram(name, weights, epoch)
writer.add_scalar('Train/Loss', avg_train_loss, epoch)
print("Average training loss: {0:.2f}".format(avg_train_loss))
print("Running Validation...")
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps = 0
for batch_no, batch in enumerate(eval_loader):
source = batch[0].to(device)
target = batch[1].to(device)
with torch.no_grad():
loss, logits = model(source, target)
logits = logits.detach().cpu().numpy()
label_ids = target.to('cpu').numpy()
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
eval_loss += loss
nb_eval_steps += 1
avg_valid_acc = eval_accuracy/nb_eval_steps
avg_valid_loss = eval_loss/nb_eval_steps
validation_loss_values.append(avg_valid_loss)
validation_accuracy_values.append(avg_valid_acc)
writer.add_scalar('Valid/Loss', avg_valid_loss, epoch)
writer.add_scalar('Valid/Accuracy', avg_valid_acc, epoch)
writer.flush()
print("Avg Val Accuracy: {0:.2f}".format(avg_valid_acc))
print("Average Val Loss: {0:.2f}".format(avg_valid_loss))
print("Time taken by epoch: {0:.2f}".format(time.time() - start_time))
return training_loss_values, validation_loss_values, validation_accuracy_values
| [
"numpy.random.seed",
"torch.stack",
"numpy.argmax",
"torch.manual_seed",
"torch.argmax",
"torch.cat",
"time.time",
"numpy.equal",
"torch.cuda.manual_seed_all",
"pathlib.Path",
"torch.cuda.is_available",
"torch.device",
"torch.no_grad"
] | [((170, 194), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (184, 194), True, 'import numpy as np\n'), ((200, 227), 'torch.manual_seed', 'torch.manual_seed', (['seed_val'], {}), '(seed_val)\n', (217, 227), False, 'import torch\n'), ((233, 269), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_val'], {}), '(seed_val)\n', (259, 269), False, 'import torch\n'), ((308, 333), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (331, 333), False, 'import torch\n'), ((284, 304), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (296, 304), False, 'import torch\n'), ((339, 358), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (351, 358), False, 'import torch\n'), ((417, 433), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (421, 433), False, 'from pathlib import Path\n'), ((2246, 2272), 'torch.argmax', 'torch.argmax', (['y_hat'], {'dim': '(1)'}), '(y_hat, dim=1)\n', (2258, 2272), False, 'import torch\n'), ((3597, 3608), 'time.time', 'time.time', ([], {}), '()\n', (3606, 3608), False, 'import time\n'), ((1251, 1275), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(2)'}), '(preds, axis=2)\n', (1260, 1275), True, 'import numpy as np\n'), ((1418, 1450), 'numpy.equal', 'np.equal', (['pred_flat', 'labels_flat'], {}), '(pred_flat, labels_flat)\n', (1426, 1450), True, 'import numpy as np\n'), ((2443, 2488), 'torch.stack', 'torch.stack', (["[x['val_loss'] for x in outputs]"], {}), "([x['val_loss'] for x in outputs])\n", (2454, 2488), False, 'import torch\n'), ((2511, 2553), 'torch.cat', 'torch.cat', (["[x['correct'] for x in outputs]"], {}), "([x['correct'] for x in outputs])\n", (2520, 2553), False, 'import torch\n'), ((4828, 4843), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4841, 4843), False, 'import torch\n'), ((5747, 5758), 'time.time', 'time.time', ([], {}), '()\n', (5756, 5758), False, 'import time\n')] |
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.0.220:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print ('Streaming ' + hoststr)
stream=urllib2.urlopen(hoststr)
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow(hoststr,i)
if cv2.waitKey(1) ==27:
exit(0) | [
"cv2.waitKey",
"cv2.imshow",
"urllib2.urlopen",
"numpy.fromstring"
] | [((206, 230), 'urllib2.urlopen', 'urllib2.urlopen', (['hoststr'], {}), '(hoststr)\n', (221, 230), False, 'import urllib2\n'), ((515, 537), 'cv2.imshow', 'cv2.imshow', (['hoststr', 'i'], {}), '(hoststr, i)\n', (525, 537), False, 'import cv2\n'), ((447, 481), 'numpy.fromstring', 'np.fromstring', (['jpg'], {'dtype': 'np.uint8'}), '(jpg, dtype=np.uint8)\n', (460, 481), True, 'import numpy as np\n'), ((548, 562), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (559, 562), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
class WindowGenerator:
def __init__(
self,
input_width,
label_width,
shift,
train_df,
validation_df,
test_df,
stride=10,
label_columns=None,
):
""""""
# Store the raw data.
self.train_df = train_df
self.validation_df = validation_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {
name: i for i, name in enumerate(label_columns)
}
self.column_indices = {name: i for i, name in enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
# data pre processing
self.shuffle = True
self.stride = stride
def __repr__(self):
return "\n".join(
[
f"Total window size: {self.total_window_size}",
f"Input indices: {self.input_indices}",
f"Label indices: {self.label_indices}",
f"Label column name(s): {self.label_columns}",
]
)
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[
labels[:, :, self.column_indices[name]]
for name in self.label_columns
],
axis=-1,
)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data) -> tf.data.Dataset:
data = np.array(data[self.label_columns], dtype=np.float32)
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=self.stride,
shuffle=self.shuffle,
batch_size=32,
)
dataset = dataset.map(self.split_window)
return dataset
@property
def train(self):
"""
element_spec
"""
return self.make_dataset(data=self.train_df)
@property
def validation(self):
return self.make_dataset(data=self.validation_df)
@property
def test(self):
return self.make_dataset(data=self.test_df)
def plot(self, inputs, labels, label_column, model=None, max_subplots=5):
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[label_column]
max_n = min(max_subplots, len(inputs))
for n in range(max_n):
plt.subplot(max_n, 1, n + 1)
plt.ylabel(f"{label_column} [normed]")
plt.plot(
self.input_indices,
inputs[n, :, plot_col_index],
label="Inputs",
marker=".",
zorder=-10,
)
if self.label_columns:
label_col_index = self.label_columns_indices.get(label_column, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(
self.label_indices,
labels[n, :, label_col_index],
edgecolors="k",
label="Labels",
c="#2ca02c",
s=64,
)
if model is not None:
predictions = model(inputs)
if predictions.shape[2] == 1:
label_col_index = 0
plt.scatter(
self.label_indices,
predictions[n, :, label_col_index],
marker="X",
edgecolors="k",
label="Predictions",
c="#ff7f0e",
s=64,
)
if n == 0:
plt.legend()
plt.xlabel("Time 100ms")
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"tensorflow.stack",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.preprocessing.timeseries_datase... | [((2514, 2566), 'numpy.array', 'np.array', (['data[self.label_columns]'], {'dtype': 'np.float32'}), '(data[self.label_columns], dtype=np.float32)\n', (2522, 2566), True, 'import numpy as np\n'), ((2585, 2778), 'tensorflow.keras.preprocessing.timeseries_dataset_from_array', 'tf.keras.preprocessing.timeseries_dataset_from_array', ([], {'data': 'data', 'targets': 'None', 'sequence_length': 'self.total_window_size', 'sequence_stride': 'self.stride', 'shuffle': 'self.shuffle', 'batch_size': '(32)'}), '(data=data, targets=\n None, sequence_length=self.total_window_size, sequence_stride=self.\n stride, shuffle=self.shuffle, batch_size=32)\n', (2637, 2778), True, 'import tensorflow as tf\n'), ((3333, 3360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (3343, 3360), True, 'import matplotlib.pyplot as plt\n'), ((4828, 4852), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time 100ms"""'], {}), "('Time 100ms')\n", (4838, 4852), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1099), 'numpy.arange', 'np.arange', (['self.total_window_size'], {}), '(self.total_window_size)\n', (1075, 1099), True, 'import numpy as np\n'), ((1275, 1308), 'numpy.arange', 'np.arange', (['self.total_window_size'], {}), '(self.total_window_size)\n', (1284, 1308), True, 'import numpy as np\n'), ((1948, 2044), 'tensorflow.stack', 'tf.stack', (['[labels[:, :, self.column_indices[name]] for name in self.label_columns]'], {'axis': '(-1)'}), '([labels[:, :, self.column_indices[name]] for name in self.\n label_columns], axis=-1)\n', (1956, 2044), True, 'import tensorflow as tf\n'), ((3511, 3539), 'matplotlib.pyplot.subplot', 'plt.subplot', (['max_n', '(1)', '(n + 1)'], {}), '(max_n, 1, n + 1)\n', (3522, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3590), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{label_column} [normed]"""'], {}), "(f'{label_column} [normed]')\n", (3562, 3590), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3705), 'matplotlib.pyplot.plot', 'plt.plot', (['self.input_indices', 'inputs[n, :, plot_col_index]'], {'label': '"""Inputs"""', 'marker': '"""."""', 'zorder': '(-10)'}), "(self.input_indices, inputs[n, :, plot_col_index], label='Inputs',\n marker='.', zorder=-10)\n", (3611, 3705), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4182), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.label_indices', 'labels[n, :, label_col_index]'], {'edgecolors': '"""k"""', 'label': '"""Labels"""', 'c': '"""#2ca02c"""', 's': '(64)'}), "(self.label_indices, labels[n, :, label_col_index], edgecolors=\n 'k', label='Labels', c='#2ca02c', s=64)\n", (4075, 4182), True, 'import matplotlib.pyplot as plt\n'), ((4471, 4611), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.label_indices', 'predictions[n, :, label_col_index]'], {'marker': '"""X"""', 'edgecolors': '"""k"""', 'label': '"""Predictions"""', 'c': '"""#ff7f0e"""', 's': '(64)'}), "(self.label_indices, predictions[n, :, label_col_index], marker=\n 'X', edgecolors='k', label='Predictions', c='#ff7f0e', s=64)\n", (4482, 4611), True, 'import matplotlib.pyplot as plt\n'), ((4806, 4818), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4816, 4818), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.